Opensource ML embedded evaluation kit

Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ba6dc28
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,17 @@
+# IDE related
+.vscode
+.idea
+
+# Downloaded files
+
+# Build files
+CMakeFiles
+build
+cmake-build-*
+
+# Virtual environments
+scripts/py/env*
+scripts/py/venv*
+env
+venv
+__pycache__*
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..06532f6
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,12 @@
+[submodule "dependencies/tensorflow"]
+	path = dependencies/tensorflow
+	url = https://github.com/tensorflow/tensorflow
+[submodule "dependencies/cmsis"]
+	path = dependencies/cmsis
+	url = https://github.com/ARM-software/CMSIS_5.git
+[submodule "dependencies/core_driver"]
+	path = dependencies/core_driver
+	url = https://review.mlplatform.org/ml/ethos-u/ethos-u-core-driver
+[submodule "dependencies/core-software"]
+	path = dependencies/core-software
+	url = https://review.mlplatform.org/ml/ethos-u/ethos-u-core-software
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..e2f109c
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,435 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+# minimum version of cmake = 3.15 for legit reason:
+# armclang support doesn't work work in previous releases
+cmake_minimum_required(VERSION 3.15.0)
+
+# Build in release mode by default
+if (NOT CMAKE_BUILD_TYPE STREQUAL Debug)
+    set(CMAKE_BUILD_TYPE Release CACHE INTERNAL "")
+endif()
+
+message(STATUS "Build type is set to ${CMAKE_BUILD_TYPE}")
+
+# Set language standards. TensorFlow Lite requires
+# std=c++11.
+set(CMAKE_C_STANDARD   99)
+set(CMAKE_CXX_STANDARD 11)
+
+# Make the standard a requirement => prevent fallback to previous
+# supported standard
+set(CMAKE_C_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+# We want to pass standard C/C++ flags, without gnu extensions
+set(CMAKE_C_EXTENSIONS OFF)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+project(arm_ethos_u55_eval
+        VERSION     21.03
+        DESCRIPTION "ARM Ethos-U55 Evaluation application for MPS3 FPGA Prototyping Board and FastModel")
+
+add_compile_definitions(PRJ_VER_STR="${PROJECT_VERSION}")
+add_compile_definitions(PRJ_DES_STR="${PROJECT_DESCRIPTION}")
+
+set(CMAKE_SCRIPTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/)
+set(DOWNLOAD_DEP_DIR  ${CMAKE_BINARY_DIR}/dependencies)
+
+include(${CMAKE_SCRIPTS_DIR}/source_gen_utils.cmake)
+include(${CMAKE_SCRIPTS_DIR}/util_functions.cmake)
+
+if (${CMAKE_BINARY_DIR} STREQUAL ${CMAKE_SOURCE_DIR})
+    message(FATAL_ERROR "Source and build are in the same directory")
+else()
+    message(STATUS "Source directory: ${CMAKE_SOURCE_DIR}")
+    message(STATUS "Binary directory: ${CMAKE_BINARY_DIR}")
+endif()
+
+USER_OPTION(LOG_LEVEL "Log level for the application"
+    LOG_LEVEL_INFO
+    STRING)
+
+USER_OPTION(TENSORFLOW_SRC_PATH "Path to the root of the tensor flow directory"
+    "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/tensorflow"
+    PATH)
+
+USER_OPTION(TARGET_PLATFORM "Target platform to execute evaluation application: mps3, simple_platform, native"
+    mps3
+    STRING)
+
+USER_OPTION(TARGET_SUBSYSTEM "Specify platform target subsystem: sse-200, sse-300 or none"
+    sse-300
+    STRING)
+
+USER_OPTION(ETHOS_U55_ENABLED "Select if Ethos-U55 is available for the platform and subsystem"
+    ON
+    BOOL)
+
+USER_OPTION(USE_CASE_BUILD "Optional. Defines the use-case to build from the available sources. By default, all use-cases are built."
+    all
+    STRING)
+
+USER_OPTION(CPU_PROFILE_ENABLED "Output CPU performance profiling information. Should be used only for MPS3 board."
+    OFF
+    BOOL)
+
+if (TARGET_PLATFORM STREQUAL mps3)
+    message(STATUS "Platform: MPS3 FPGA Prototyping Board or SSE-XXX FVP")
+elseif (TARGET_PLATFORM STREQUAL simple_platform)
+    message(STATUS "Platform: Simple platform within minimal peripherals")
+elseif (TARGET_PLATFORM STREQUAL native)
+    message(STATUS "Platform: Native (Linux based x86_64/aarch64 system)")
+else ()
+    message(FATAL_ERROR "Invalid platform specified: ${TARGET_PLATFORM}")
+endif ()
+
+enforce_compiler_version()
+setup_source_generator()
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+set(SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/source)
+
+if (CPU_PROFILE_ENABLED)
+    set(PROFILING_OPT "${PROFILING_OPT} -DCPU_PROFILE_ENABLED")
+endif()
+
+# Include platform specific sources
+if (TARGET_PLATFORM STREQUAL native)
+    set(PLATFORM_SOURCES_CMAKE_FILE ${CMAKE_SCRIPTS_DIR}/${TARGET_PLATFORM}-sources.cmake)
+else ()
+    set(PLATFORM_SOURCES_CMAKE_FILE ${CMAKE_SCRIPTS_DIR}/bare-metal-sources.cmake)
+
+    USER_OPTION(CMSIS_SRC_PATH
+        "Path to CMSIS-5 sources"
+        "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/cmsis"
+        PATH
+        )
+
+    if (CMAKE_BUILD_TYPE STREQUAL Debug AND CMAKE_CXX_COMPILER_ID STREQUAL ARMClang)
+        USER_OPTION(ARMCLANG_DEBUG_DWARF_LEVEL
+            "Dwarf conformance level for armclang toolchain"
+            "4" # Default = 4 (Arm-DS etc). For model debugger specify "3"
+            STRING
+            )
+    elseif (DEFINED ARMCLANG_DEBUG_DWARF_LEVEL)
+        message(WARNING "ARMCLANG_DEBUG_DWARF_LEVEL definition is unsupported"
+                        "within current configuration. Removing definition...")
+        unset(ARMCLANG_DEBUG_DWARF_LEVEL CACHE)
+    endif()
+
+endif ()
+message(STATUS "Including ${PLATFORM_SOURCES_CMAKE_FILE}")
+include(${PLATFORM_SOURCES_CMAKE_FILE})
+
+if (${CMAKE_CROSSCOMPILING})
+    enable_language(ASM)
+
+    # For non-native builds, we build with CMSIS-DSP support.
+    include(${CMAKE_SCRIPTS_DIR}/cmsis-dsp.cmake)
+
+    # All CMSIS headers to be used:
+    set(CMSIS_HEADERS
+        ${CMSIS_DSP_INC_DIR}
+        ${CMSIS_CORE_INC_DIR}
+        ${CMSIS_SRC_PATH}/Device/ARM/ARMCM55/Include
+        ${CMSIS_SRC_PATH}/Device/ARM/ARMCM55/Include/Template)
+endif ()
+
+# If we need NPU libraries:
+if (ETHOS_U55_ENABLED)
+
+    message(STATUS "Using ARM Ethos-U55 - adding core-driver and timing-adapter-driver includes and libraries")
+    USER_OPTION(ETHOS_U55_TIMING_ADAPTER_SRC_PATH
+        "Path to Ethos-U55 timing adapter sources"
+        "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/core-software/drivers/timing_adapter"
+        PATH
+        )
+
+    USER_OPTION(ETHOS_U55_DRIVER_SRC_PATH
+        "Path to Ethos-U55 core driver sources"
+        "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/core_driver"
+        PATH
+        )
+
+    include_directories("${ETHOS_U55_TIMING_ADAPTER_SRC_PATH}/include/")
+
+    add_subdirectory("${ETHOS_U55_TIMING_ADAPTER_SRC_PATH}" ${CMAKE_BINARY_DIR}/timing-adapter)
+
+    set(ETHOSU_INCLUDES ${ETHOS_U55_TIMING_ADAPTER_SRC_PATH}/include
+                        ${ETHOS_U55_DRIVER_SRC_PATH}/include)
+
+    list(APPEND ETHOS_U55_LIBS ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libtiming_adapter.a)
+endif ()
+
+include(${CMAKE_SCRIPTS_DIR}/tensorflow.cmake)
+
+set(DEP_TENSORFLOW_LITE_MICRO_SUB_DIR ${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro)
+set(DEP_TENSORFLOW_LITE_MICRO_MAKE_DIR ${DEP_TENSORFLOW_LITE_MICRO_SUB_DIR}/tools/make/targets)
+set(DEP_FLATBUF_INCLUDE ${DEP_TENSORFLOW_LITE_MICRO_SUB_DIR}/tools/make/downloads/flatbuffers/include)
+
+set(TENSORFLOW_LIBRARY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME})
+
+set(DEP_TF_INCLUDE_DIRS
+    ${TENSORFLOW_SRC_PATH}
+    ${DEP_TENSORFLOW_LITE_MICRO_SUB_DIR}
+    ${ETHOSU_INCLUDES}
+    ${CMSIS_HEADERS}
+    )
+
+## All TPIP includes
+set(DEP_RUNTIME_INCLUDE_DIRS
+    ${DEP_TF_INCLUDE_DIRS}
+    ${DEP_FLATBUF_INCLUDE}
+    )
+
+# Our entry point into tensorflow world:
+file(GLOB_RECURSE SRC_TENSORFLOW_LITE_MICRO
+    ${SRC_PATH}/application/tensorflow-lite-micro/**/*.cc
+    ${SRC_PATH}/application/tensorflow-lite-micro/*.cc
+    )
+
+set(HAL_DIR ${SRC_PATH}/application/hal)
+
+# HAL API sources
+file(GLOB_RECURSE SRC_HAL
+    "${HAL_DIR}/hal.c"
+    )
+
+# Set platform specific HAL sources; these should be provided
+# by each platform's cmake include file
+list(APPEND SRC_HAL ${SRC_PLAT_HAL})
+
+# Include directories:
+set(APPLICATION_INCLUDE_DIRS
+    ${HAL_DIR}/include
+    ${SRC_PATH}/application/tensorflow-lite-micro/include
+    ${SRC_PATH}/application/main/include
+    ${PLAT_INCLUDE_DIRS}
+    )
+
+file(GLOB_RECURSE SRC_APPLICATION
+    "${SRC_PATH}/application/main/*.cc"
+    "${SRC_PATH}/application/main/*.cpp"
+    "${SRC_PATH}/application/main/*.c"
+    "${SRC_PATH}/application/main/**/*.cc"
+    "${SRC_PATH}/application/main/**/*.cpp"
+    "${SRC_PATH}/application/main/**/*.c"
+    )
+list(FILTER SRC_APPLICATION EXCLUDE REGEX ".*main\\.c.*$")
+
+list(JOIN USE_CASE_BUILD "" USE_CASE_BUILD_STR)
+if (${USE_CASE_BUILD_STR} STREQUAL all)
+    SUBDIRLIST(USE_CASES ${SRC_PATH}/use_case/)
+else()
+    set(USE_CASES ${USE_CASE_BUILD})
+endif()
+
+set(TEST_SRCS  ${CMAKE_CURRENT_SOURCE_DIR}/tests)
+if (NOT ${CMAKE_CROSSCOMPILING})
+
+    #Test TPIP
+    set(TEST_TPIP ${DOWNLOAD_DEP_DIR}/test)
+    file(MAKE_DIRECTORY ${TEST_TPIP})
+    set(TEST_TPIP_INCLUDE ${TEST_TPIP}/include)
+    file(MAKE_DIRECTORY ${TEST_TPIP_INCLUDE})
+
+    include(ExternalProject)
+
+    ExternalProject_Add(catch2-headers
+        URL https://github.com/catchorg/Catch2/releases/download/v2.11.1/catch.hpp
+        DOWNLOAD_NO_EXTRACT 1
+        CONFIGURE_COMMAND ""
+        BUILD_COMMAND bash -c "cp -R <DOWNLOAD_DIR>/catch.hpp ${TEST_TPIP_INCLUDE}"
+        INSTALL_COMMAND ""
+        )
+endif ()
+
+message(STATUS "Building use-cases: ${USE_CASES}.")
+foreach(use_case ${USE_CASES})
+
+    if (EXISTS ${SRC_PATH}/use_case/${use_case})
+        message(STATUS "Found sources for use-case ${use_case}")
+    else ()
+        message(FATAL_ERROR "Faild to find sources for ${use_case} in ${SRC_PATH}/use_case/${use_case}!")
+    endif ()
+    # Executable application:
+    set(TARGET_NAME "ethos-u-${use_case}")
+
+    set(DEFAULT_MODEL_DIR   ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/models)
+    set(SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/src)
+    set(INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/include)
+
+    # Remove old files and recreate dirs
+    file(REMOVE_RECURSE ${SRC_GEN_DIR} ${INC_GEN_DIR})
+    file(MAKE_DIRECTORY ${SRC_GEN_DIR} ${INC_GEN_DIR})
+
+    file(GLOB_RECURSE UC_SRC
+        "${SRC_PATH}/use_case/${use_case}/src/*.cpp"
+        "${SRC_PATH}/use_case/${use_case}/src/*.cc"
+        "${SRC_PATH}/use_case/${use_case}/src/*.c"
+        "${SRC_PATH}/use_case/${use_case}/src/**/*.cpp"
+        "${SRC_PATH}/use_case/${use_case}/src/**/*.cc"
+        "${SRC_PATH}/use_case/${use_case}/src/**/*.c"
+        )
+
+    set(UC_INCLUDE
+        ${SRC_PATH}/use_case/${use_case}/include
+        )
+
+    file(GLOB UC_CMAKE_FILE
+        "${SRC_PATH}/use_case/${use_case}/*.cmake"
+        )
+
+    include(${UC_CMAKE_FILE})
+
+    file(GLOB_RECURSE SRC_GEN
+        "${SRC_GEN_DIR}/*.cc"
+        "${SRC_GEN_DIR}/*.cpp"
+        "${SRC_GEN_DIR}/*.c"
+        )
+
+    set(SRC_MAIN
+        "${SRC_PATH}/application/main/Main.cc"
+        )
+
+    set(UC_LIB_NAME lib${TARGET_NAME})
+
+    # Consolidated application static lib:
+    add_library(${UC_LIB_NAME} STATIC
+        ${SRC_APPLICATION}
+        ${SRC_TENSORFLOW_LITE_MICRO}
+        ${SRC_HAL}
+        ${UC_SRC}
+        ${SRC_GEN}
+        )
+    target_include_directories(${UC_LIB_NAME} PUBLIC
+        ${APPLICATION_INCLUDE_DIRS}
+        ${DEP_RUNTIME_INCLUDE_DIRS}
+        ${UC_INCLUDE}
+        ${INC_GEN_DIR}
+        )
+
+    # Set the activation buffer size
+    target_compile_definitions(${UC_LIB_NAME} PUBLIC
+            "ACTIVATION_BUF_SZ=${${use_case}_ACTIVATION_BUF_SZ}")
+
+    add_dependencies(${UC_LIB_NAME} tensorflow-lite-micro)
+
+    if (${CMAKE_CROSSCOMPILING})
+        # If we are building timing adapter, set the dependency:
+        if (ETHOS_U55_ENABLED)
+            message(STATUS "Adding timing_adapter as a dependency to ${UC_LIB_NAME}")
+            add_dependencies(${UC_LIB_NAME} timing_adapter)
+        endif()
+
+        # If building with CMSIS-DSP support:
+        if (DEFINED CMSIS_DSP_TARGET)
+            message(STATUS "Adding ${CMSIS_DSP_TARGET} as a dependency to ${UC_LIB_NAME}")
+            add_dependencies(${UC_LIB_NAME} ${CMSIS_DSP_TARGET})
+        endif()
+    endif()
+
+    target_link_libraries(${UC_LIB_NAME} PUBLIC
+        ${TENSORFLOW_LIBRARY}
+        $<$<BOOL:${ETHOS_U55_ENABLED}>:${ETHOS_U55_LIBS}>
+        $<$<BOOL:${CMSIS_DSP_LIB}>:${CMSIS_DSP_LIB}>)
+
+    add_executable(${TARGET_NAME} ${SRC_MAIN})
+
+    target_link_libraries(${TARGET_NAME} ${UC_LIB_NAME})
+
+    if (${CMAKE_CROSSCOMPILING})
+        set_target_properties(${TARGET_NAME} PROPERTIES SUFFIX ".axf")
+    endif()
+
+    if (${TARGET_PLATFORM} STREQUAL mps3)
+
+        SET(SECTORS_DIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/sectors/${use_case})
+        file(REMOVE_RECURSE ${SECTORS_DIR})
+        file(MAKE_DIRECTORY ${SECTORS_DIR})
+
+        add_custom_command(TARGET ${TARGET_NAME}
+            POST_BUILD
+            COMMAND fromelf --bin --output=${SECTORS_DIR}/
+            ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET_NAME}.axf)
+
+        add_custom_target(
+            run-${use_case} ALL
+            COMMAND ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/scripts/py/gen_fpga_mem_map.py
+            --scatter_file_path ${SCAT_FILE}
+            --target_subsystem  ${TARGET_SUBSYSTEM}
+            --output_file_path  ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/images-${use_case}.txt
+            COMMENT "Generating FPGA mappings file")
+    elseif (${TARGET_PLATFORM} STREQUAL native)
+        # Add tests only if they exists for the usecase
+        if (EXISTS ${TEST_SRCS}/use_case/${use_case})
+
+            set(TEST_RESOURCES_INCLUDE
+                "${TEST_SRCS}/utils/"
+                "${TEST_SRCS}/resources/golden_fv/"
+                )
+
+            # Define Test sources and new target to run unit tests
+            file(GLOB_RECURSE TEST_SOURCES
+                "${TEST_SRCS}/common/*.cpp"
+                "${TEST_SRCS}/common/*.cc"
+                "${TEST_SRCS}/utils/*.cc"
+                "${TEST_SRCS}/utils/*.cpp"
+                "${TEST_SRCS}/use_case/${use_case}/*.cpp"
+                "${TEST_SRCS}/use_case/${use_case}/*.cc"
+                "${TEST_SRCS}/use_case/${use_case}/*.c"
+                "${TEST_SRCS}/use_case/${use_case}/**/*.cpp"
+                "${TEST_SRCS}/use_case/${use_case}/**/*.cc"
+                "${TEST_SRCS}/use_case/${use_case}/**/*.c"
+                )
+
+            if (DEFINED ${use_case}_TEST_IFM AND DEFINED ${use_case}_TEST_OFM)
+                message(STATUS  "Test vectors are available for ${${use_case}_MODEL_TFLITE_PATH} "
+                                "Input: ${${use_case}_TEST_IFM} "
+                                "Output: ${${use_case}_TEST_OFM}")
+
+                set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+                set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+                file(GLOB_RECURSE TEST_SOURCES_GEN
+                    "${TEST_SRC_GEN_DIR}/*.cc"
+                    "${TEST_SRC_GEN_DIR}/**/*.cc"
+                    )
+                message(STATUS "Adding ${TEST_SOURCES_GEN} to test sources")
+                list(APPEND TEST_SOURCES ${TEST_SOURCES_GEN})
+                list(APPEND TEST_RESOURCES_INCLUDE ${TEST_INC_GEN_DIR})
+            endif()
+
+            set(TEST_TARGET_NAME "${CMAKE_PROJECT_NAME}-${use_case}-tests")
+            add_executable(${TEST_TARGET_NAME} ${TEST_SOURCES})
+            target_include_directories(${TEST_TARGET_NAME} PUBLIC
+                ${TEST_TPIP_INCLUDE} ${TEST_RESOURCES_INCLUDE})
+            target_link_libraries(${TEST_TARGET_NAME} libethos-u-${use_case})
+            target_compile_definitions(${TEST_TARGET_NAME} PRIVATE
+                "ACTIVATION_BUF_SZ=${${use_case}_ACTIVATION_BUF_SZ}"
+                TESTS)
+
+            add_dependencies(
+                "${TEST_TARGET_NAME}"
+                "catch2-headers"
+            )
+        endif ()
+    endif ()
+endforeach()
+
+print_useroptions()
diff --git a/LICENSE_APACHE_2.0.txt b/LICENSE_APACHE_2.0.txt
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE_APACHE_2.0.txt
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/Readme.md b/Readme.md
new file mode 100644
index 0000000..db62515
--- /dev/null
+++ b/Readme.md
@@ -0,0 +1,89 @@
+# Arm® ML embedded evaluation kit
+
+This repository is for building and deploying Machine Learning (ML) applications targeted for Arm® Cortex®-M and Arm®
+Ethos™-U NPU.
+To run evaluations using this software, we suggest using an [MPS3 board](https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/mps3)
+or a fixed virtual platform (FVP) that supports Ethos-U55 software fast model. Both environments run a combination of
+the new [Arm® Cortex™-M55 processor](https://www.arm.com/products/silicon-ip-cpu/cortex-m/cortex-m55) and the
+[Arm® Ethos™-U55 NPU](https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-u55).
+
+## Overview of the evaluation kit
+
+The purpose of the evaluation kit is to allow the user to develop software and test the performance of the Ethos-U55 NPU and
+Cortex-M55 CPU. The Ethos-U55 NPU is a new class of machine learning(ML) processor, specifically designed to accelerate
+computation for ML workloads in constrained embedded and IoT devices. The product is optimized to execute
+mathematical operations efficiently that are commonly used in ML algorithms, such as convolutions or activation functions.
+
+## ML use cases
+
+The evaluation kit adds value by providing ready to use ML applications for the embedded stack. As a result, you can
+experiment with the already developed software use cases and create your own applications for Cortex-M CPU and Ethos-U NPU.
+The example application at your disposal and the utilized models are listed in the table below.
+
+|   ML application                     |  Description | Neural Network Model |
+| :----------------------------------: | :-----------------------------------------------------: | :----: |
+|  [Image classification](./docs/use_cases/img_class.md)              | Recognize the presence of objects in a given image | [Mobilenet V2](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8)   |
+|  [Keyword spotting(KWS)](./docs/use_cases/kws.md)             | Recognize the presence of a key word in a recording | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8) |
+|  [Automated Speech Recognition(ASR)](./docs/use_cases/asr.md) | Transcribe words in a recording | [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8) |
+|  [KWS and ASR](./docs/use_cases/kws_asr.md) | Utilise Cortex-M and Ethos-U to transcribe words in a recording after a keyword was spotted | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8)  [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8) |
+|  [Anomaly Detection](./docs/use_cases/ad.md)                 | Detecting abnormal behavior based on a sound recording of a machine | Coming soon|
+| [Generic inference runner](./docs/use_cases/inference_runner.md) | Code block allowing you to develop your own use case for Ethos-U55 NPU | Your custom model |
+
+The above use cases implement end-to-end ML flow including data pre-processing and post-processing. They will allow you
+to investigate embedded software stack, evaluate performance of the networks running on Cortex-M55 CPU and Ethos-U55 NPU
+by displaying different performance metrics such as inference cycle count estimation and results of the network execution.
+
+## Software and hardware overview
+
+The evaluation kit is based on the [Arm® Corstone™-300 reference package](https://developer.arm.com/ip-products/subsystem/corstone/corstone-300).
+Arm® Corstone™-300 helps you build SoCs quickly on the Arm® Cortex™-M55 and Arm® Ethos™-U55 designs. Arm® Corstone™-300 design
+implementation is publicly available on an [Arm MPS3 FPGA board](https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/download-fpga-images),
+or as a [Fixed Virtual Platform of the MPS3 development board](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+The Ethos-U NPU software stack is described [here](https://developer.arm.com/documentation/101888/0500/NPU-software-overview/NPU-software-components?lang=en).
+
+All ML use cases, albeit illustrating a different application, have common code such as initializing the Hardware
+Abstraction Layer (HAL). The application common code can be run on x86 or Arm Cortex-M architecture thanks to the HAL.
+For the ML application-specific part, Google® TensorFlow™ Lite for Microcontrollers inference engine is used to schedule
+the neural networks models executions. TensorFlow Lite for Microcontrollers is integrated with the [Ethos-U55 driver](https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git)
+and delegates execution of certain operators to the NPU or, if the neural network model operators are not supported on
+NPU, to the CPU. [CMSIS-NN](https://github.com/ARM-software/CMSIS_5) is used to optimise CPU workload execution
+with int8 data type.
+Common ML application functions will help you to focus on implementing logic of your custom ML use case: you can modify
+only the use case code and leave all other components unchanged. Supplied build system will discover new ML application
+code and automatically include it into compilation flow.
+
+![APIs](./docs/media/APIs_description.png)
+
+To run an ML application on the Cortex-M and Ethos-U55 NPU, please, follow these steps:
+
+1. Setup your environment by installing [the required prerequisites](./docs/sections/building.md#Build-prerequisites).
+2. Generate an optimized neural network model for Ethos-U with a Vela compiler by following instructions [here](./docs/sections/building.md#Add-custom-model).
+3. [Configure the build system](./docs/sections/building.md#Build-process).
+4. [Compile the project](./docs/sections/building.md#Building-the-configured-project) with a `make` command.
+5. If using a FVP, [launch the desired application on the FVP](./docs/sections/deployment.md#Fixed-Virtual-Platform).
+If using the FPGA option, load the image on the FPGA and [launch the application](./docs/sections/deployment.md#MPS3-board).
+
+To get familiar with these steps, you can follow the [quick start guide](docs/quick_start.md).
+
+For more details:
+
+- [Arm Ethos-U55 NPU Code Samples](docs/documentation.md)
+  - [Trademarks](docs/documentation.md#Trademarks)
+  - [Prerequisites](docs/documentation.md#Prerequisites)
+    - [Additional reading](docs/documentation.md#additional-reading)
+  - [Repository structure](docs/documentation.md#repository-structure)
+  - [Building](docs/documentation.md#building)
+  - [Deployment](docs/documentation.md#deployment)
+  - [Running sample applications](docs/documentation.md#running-code-samples-applications)
+  - [Implementing custom ML application](docs/documentation.md#implementing-custom-ML-application)
+  - [Testing and benchmarking](docs/documentation.md#testing-and-benchmarking)
+  - [Troubleshooting](docs/documentation.md#troubleshooting)
+  - [Appendix](docs/documentation.md#appendix)
+  - [Contribution guidelines](docs/documentation.md#contribution-guidelines)
+    - [Coding standards and guidelines](docs/documentation.md#coding-standards-and-guidelines)
+    - [Code Reviews](docs/documentation.md#code-reviews)
+    - [Testing](docs/documentation.md#testing)
+  - [Communications](docs/documentation.md#communications)
+  - [Licenses](docs/documentation.md#licenses)
+
diff --git a/dependencies/cmsis b/dependencies/cmsis
new file mode 160000
index 0000000..0d7e4fa
--- /dev/null
+++ b/dependencies/cmsis
@@ -0,0 +1 @@
+Subproject commit 0d7e4fa7131241a17e23dfae18140e0b2e77728f
diff --git a/dependencies/core-software b/dependencies/core-software
new file mode 160000
index 0000000..3a0d3f2
--- /dev/null
+++ b/dependencies/core-software
@@ -0,0 +1 @@
+Subproject commit 3a0d3f286be62b4933ba404187aff23cae166a5a
diff --git a/dependencies/core_driver b/dependencies/core_driver
new file mode 160000
index 0000000..8565d75
--- /dev/null
+++ b/dependencies/core_driver
@@ -0,0 +1 @@
+Subproject commit 8565d75b96a2f57f559f12dc0c68438bcfd276c8
diff --git a/dependencies/tensorflow b/dependencies/tensorflow
new file mode 160000
index 0000000..6cff09a
--- /dev/null
+++ b/dependencies/tensorflow
@@ -0,0 +1 @@
+Subproject commit 6cff09aee1f832d495b3cae40cab0de58155a0af
diff --git a/docs/documentation.md b/docs/documentation.md
new file mode 100644
index 0000000..655ef27
--- /dev/null
+++ b/docs/documentation.md
@@ -0,0 +1,390 @@
+# Arm Ethos-U55 NPU Code Samples
+
+## Table of Content
+
+- [Arm Ethos-U55 NPU Code Samples](./documentation.md#arm-ethos-u55-npu-code-samples)
+  - [Table of Content](./documentation.md#table-of-content)
+  - [Trademarks](./documentation.md#trademarks)
+  - [Prerequisites](./documentation.md#prerequisites)
+    - [Additional reading](./documentation.md#additional-reading)
+  - [Repository structure](./documentation.md#repository-structure)
+  - [Models and resources](./documentation.md#models-and-resources)
+  - [Building](./documentation.md#building)
+  - [Deployment](./documentation.md#deployment)
+  - [Running code samples applications](./documentation.md#running-code-samples-applications)
+  - [Implementing custom ML application](./documentation.md#implementing-custom-ml-application)
+  - [Testing and benchmarking](./documentation.md#testing-and-benchmarking)
+  - [Troubleshooting](./documentation.md#troubleshooting)
+    - [Coding standards and guidelines](./documentation.md#coding-standards-and-guidelines)
+    - [Code Reviews](./documentation.md#code-reviews)
+    - [Testing](./documentation.md#testing)
+  - [Appendix](./documentation.md#appendix)
+
+## Trademarks
+
+- Arm® and Cortex® are registered trademarks of Arm® Limited (or its subsidiaries) in the US and/or elsewhere.
+- Arm® and Ethos™ are registered trademarks or trademarks of Arm® Limited (or its subsidiaries) in the US and/or elsewhere.
+- Arm® and Corstone™ are registered trademarks or trademarks of Arm® Limited (or its subsidiaries) in the US and/or elsewhere.
+- TensorFlow™, the TensorFlow logo and any related marks are trademarks of Google Inc.
+
+## Prerequisites
+
+Before starting the setup process, please make sure that you have:
+
+- Linux x86_64 based machine or Windows Subsystem for Linux is
+    preferable. Windows can be used as a build environment but cannot
+    run Fast Model simulations.
+
+- Arm Compiler license (version 6.14 or above).
+
+  - [Arm Compiler Download
+        Page](https://developer.arm.com/tools-and-software/embedded/arm-compiler/downloads/)
+
+- An Arm® MPS3 FPGA prototyping board and components for FPGA evaluation or a `Fixed Virtual Platform` binary:
+  - An MPS3 board loaded with  Arm® Corstone™-300 reference package (`AN547`) from:
+    <https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/download-fpga-images>.
+    You would also need to have a USB connection between your machine and the MPS3 board - for UART menu and for
+    deploying the application.
+  - `Arm Corstone-300` based FVP for MPS3 is available from: <https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps>.
+
+### Additional reading
+
+This document contains information that is specific to Arm® Ethos™-U55 products.
+See the following documents for other relevant information:
+
+- ML platform overview: <https://mlplatform.org/>
+
+- Arm® ML processors technical overview: <https://developer.arm.com/ip-products/processors/machine-learning>
+
+- Arm® Cortex-M55® processor: <https://www.arm.com/products/silicon-ip-cpu/cortex-m/cortex-m55>
+
+- ML processor, also referred to as a Neural Processing Unit (NPU) - Arm® Ethos™-U55:
+    <https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-u55>
+
+- Arm® MPS3 FPGA Prototyping Board:
+    <https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/mps3>
+
+- Arm® ML-Zoo: <https://github.com/ARM-software/ML-zoo/>
+
+See <http://developer.arm.com> for access to Arm documentation.
+
+
+## Repository structure
+
+The repository has the following structure:
+
+```tree
+.
+├── dependencies
+├── docs
+├── scripts
+│   └── ...
+├── source
+│   ├── application
+│   │ ├── hal
+│   │ ├── main
+│   │ └── tensorflow-lite-micro
+│   └── use_case
+│     └── <usecase_name>
+│          ├── include
+│          ├── src
+│          └── usecase.cmake
+├── tests
+│   └── ...
+└── CMakeLists.txt
+```
+
+Where:
+
+- `dependencies`: contains all the third party dependencies for this project.
+
+- `docs`: contains the documentation for this ML applications.
+
+- `scripts`: contains build related and source generation scripts.
+
+- `source`: contains C/C++ sources for the platform and ML applications.
+    Common code related to the Ethos-U55 NPU software
+    framework resides in *application* sub-folder with the following
+    structure:
+
+  - `application`: contains all the sources that form the *core* of the application.
+    The `use case` part of the sources depend on sources here.
+
+    - `hal`: contains hardware abstraction layer sources providing a
+        platform agnostic API to access hardware platform specific functions.
+
+    - `main`: contains the main function and calls to platform initialization
+          logic to set things up before launching the main loop.
+          It also contains sources common to all use case implementations.
+
+    - `tensorflow-lite-micro`: contains abstraction around TensorFlow Lite Micro API
+          implementing common functions to initialize a neural network model, run an inference, and
+          access inference results.
+
+  - `use_case`: contains the ML use-case specific logic. Having this as a separate sub-folder isolates ML specific
+    application logic with the assumption that the `application` will do all the required set up for logic here to run.
+    It also makes it easier to add a new use case block.
+
+  - `tests`: contains the x86 tests for the use case applications.
+
+Hardware abstraction layer has the following structure:
+
+```tree
+hal
+├── hal.c
+├── include
+│   └── ...
+└── platforms
+    ├── bare-metal
+    │   ├── bsp
+    │   │   ├── bsp-core
+    │   │   │   └── include
+    │   │   ├── bsp-packs
+    │   │   │   └── mps3
+    │   │   ├── cmsis-device
+    │   │   ├── include
+    │   │   └── mem_layout
+    │   ├── data_acquisition
+    │   ├── data_presentation
+    │   │   ├── data_psn.c
+    │   │   └── lcd
+    │   │       └── include
+    │   ├── images
+    │   ├── timer
+    │   └── utils
+    └── native
+```
+
+- `include` and `hal.c`: contains the hardware abstraction layer (HAL) top level platform API and data acquisition, data
+presentation and timer interfaces.
+    > Note: the files here and lower in the hierarchy have been written in
+    C and this layer is a clean C/C++ boundary in the sources.
+
+- `platforms/bare-metal/data_acquisition`\
+`platforms/bare-metal/data_presentation`\
+`platforms/bare-metal/timer`\
+`platforms/bare-metal/utils`: contains bare metal HAL support layer and platform initialisation helpers. Function calls
+  are routed to platform specific logic at this level. For example, for data presentation, an `lcd` module has been used.
+  This wraps the LCD driver calls for the actual hardware (for example MPS3).
+
+- `platforms/bare-metal/bsp/bsp-packs`: contains the core low-level drivers (written in C) for the platform reside.
+  For supplied examples this happens to be an MPS3 board, but support could be added here for other platforms too.
+  The functions defined in this space are wired to the higher level functions under HAL (as those in `platforms/bare-metal/` level).
+
+- `platforms/bare-metal/bsp/bsp-packs/mps3/include`\
+`platforms/bare-metal/bsp/bsp-packs/mps3`: contains the peripheral (LCD, UART and timer) drivers specific to MPS3 board.
+
+- `platforms/bare-metal/bsp/bsp-core`\
+`platforms/bare-metal/bsp/include`: contains the BSP core sources common to all BSPs. These include a UART header
+  (only the implementation of this is platform specific, but the API is common) and "re-targeting" of the standard output
+  and error streams to the UART block.
+
+- `platforms/bare-metal/bsp/cmsis-device`: contains the CMSIS template implementation for the CPU and also device
+  initialisation routines. It is also where the system interrupts are set up and handlers are overridden.
+  The main entry point of a bare metal application will most likely reside in this space. This entry point is
+  responsible for setting up before calling the user defined "main" function in the higher level `application` logic.
+
+- `platforms/bare-metal/bsp/mem_layout`: contains the platform specific linker scripts.
+
+### Models and resources
+
+The models used in the use cases implemented in this project can be downloaded
+from [Arm ML-Zoo](https://github.com/ARM-software/ML-zoo/).
+
+- [Mobilenet V2](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8).
+- [DS-CNN](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8).
+- [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8).
+- Anomaly Detection (coming soon).
+
+When using Ethos-U55 backend, the NN model is assumed to be optimized by Vela compiler.
+However, even if not, it will fall back on the CPU and execute, if supported by TensorFlow Lite Micro.
+
+![Vela compiler](./media/vela_flow.jpg)
+
+The Vela compiler is a tool that can optimize a neural network model
+into a version that can run on an embedded system containing Ethos-U55.
+
+The optimized model will contain custom operators for sub-graphs of the
+model that can be accelerated by Ethos-U55, the remaining layers that
+cannot be accelerated are left unchanged and will run on the CPU using
+optimized (CMSIS-NN) or reference kernels provided by the inference
+engine.
+
+For detailed information see [Optimize model with Vela compiler](./sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+## Building
+
+This section describes how to build the code sample applications from sources - illustrating the build
+options and the process.
+
+The project can be built for MPS3 FPGA and FVP emulating MPS3. Default values for configuration parameters
+will build executable models with Ethos-U55 support.
+See:
+
+- [Building](./sections/building.md)
+  - [Build prerequisites](./sections/building.md#build-prerequisites)
+  - [Build options](./sections/building.md#build-options)
+  - [Build Process](./sections/building.md#build-process)
+    - [Preparing build environment](./sections/building.md#Preparing-build-environment)
+    - [Create a build directory](./sections/building.md#Create-a-build-directory)
+    - [Configuring the build for `MPS3: SSE-300`](./sections/building.md#Configuring-the-build-for-`MPS3:-SSE-300`)
+    - [Configuring build for different Arm Ethos-U55 configurations](./sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations)
+    - [Configuring the build for `MPS3: SSE-200`](./sections/building.md#Configuring-the-build-for-`MPS3:-SSE-200`)
+    - [Configuring the build native unit-test](./sections/building.md#configuring-the-build-native-unit-test)
+    - [Configuring the build for `simple_platform`](./sections/building.md#configuring-the-build-for-`simple_platform`)
+    - [Building the configured project](./sections/building.md#Building-the-configured-project)
+  - [Building timing adapter with custom options](./sections/building.md#building-timing-adapter-with-custom-options)
+  - [Add custom inputs](./sections/building.md#add-custom-inputs)
+  - [Add custom model](./sections/building.md#add-custom-model)
+  - [Optimize custom model with Vela compiler](./sections/building.md#Optimize-custom-model-with-Vela-compiler)
+  - [Memory constraints](./sections/building.md#memory-constraints)
+  - [Automatic file generation](./sections/building.md#automatic-file-generation)
+
+## Deployment
+
+This section describes how to deploy the code sample applications on the Fixed Virtual Platform or the MPS3 board.
+See:
+
+- [Deployment](./sections/deployment.md)
+  - [Fixed Virtual Platform](./sections/deployment.md#fixed-Virtual-Platform)
+    - [Setting up the MPS3 Corstone-300 FVP](./sections/deployment.md#Setting-up-the-MPS3-Corstone-300-FVP)
+    - [Deploying on an FVP emulating MPS3](./sections/deployment.md#Deploying-on-an-FVP-emulating-MPS3)
+  - [MPS3 board](./sections/deployment.md#MPS3-board)
+    - [Deployment on MPS3 board](./sections/deployment.md#Deployment-on-MPS3-board)
+
+## Running code samples applications
+
+This section covers the process for getting started with pre-built binaries for the code samples.
+See [Running applications](./sections/run.md).
+
+## Implementing custom ML application
+
+This section describes how to implement a custom Machine Learning application running
+on a platform supported by the repository (Fixed Virtual Platform or an MPS3 board).
+
+Ethos-U55 NPU Code Samples software project offers a simple way to incorporate additional
+use-case code into the existing infrastructure and provides a build
+system that automatically picks up added functionality and produces
+corresponding executable for each use-case.
+
+See:
+
+- [Customizing](./sections/customizing.md)
+  - [Software project description](./sections/customizing.md#Software-project-description)
+  - [HAL API](./sections/customizing.md#hal-api)
+  - [Main loop function](./sections/customizing.md#main-loop-function)
+  - [Application context](./sections/customizing.md#application-context)
+  - [Profiler](./sections/customizing.md#Profiler)
+  - [NN Model API](./sections/customizing.md#NN-model-API)
+  - [Adding custom ML use-case](./sections/customizing.md#Adding-custom-ML-use-case)
+  - [Implementing main loop](./sections/customizing.md#Implementing-main-loop)
+  - [Implementing custom NN model](./sections/customizing.md#Implementing-custom-NN-model)
+  - [Executing inference](./sections/customizing.md#executing-inference)
+  - [Printing to console](./sections/customizing.md#printing-to-console)
+  - [Reading user input from console](./sections/customizing.md#reading-user-input-from-console)
+  - [Output to MPS3 LCD](./sections/customizing.md#output-to-MPS3-LCD)
+  - [Building custom use-case](./sections/customizing.md#building-custom-use-case)
+
+## Testing and benchmarking
+
+See [Testing and benchmarking](./sections/testing_benchmarking.md).
+
+## Troubleshooting
+
+See:
+
+- [Troubleshooting](./sections/troubleshooting.md)
+  - [Inference results are incorrect for my custom files](./sections/troubleshooting.md#Inference-results-are-incorrect-for-my-custom-files)
+  - [The application does not work with my custom model](./sections/troubleshooting.md#The-application-does-not-work-with-my-custom-model)
+
+## Appendix
+
+See:
+
+- [Appendix](./sections/appendix.md)
+  - [Cortex-M55 Memory map overview](./sections/appendix.md#cortex-m55-memory-map-overview)
+
+## Contribution guidelines
+
+Contributions are only accepted under the following conditions:
+
+- The contribution have certified origin and give us your permission. To manage this process we use
+  [Developer Certificate of Origin (DCO) V1.1](https://developercertificate.org/).
+  To indicate that contributors agree to the the terms of the DCO, it's neccessary "sign off" the
+  contribution by adding a line with name and e-mail address to every git commit message:
+
+  ```log
+  Signed-off-by: John Doe <john.doe@example.org>
+  ```
+
+  This can be done automatically by adding the `-s` option to your `git commit` command.
+  You must use your real name, no pseudonyms or anonymous contributions are accepted.
+
+- You give permission according to the [Apache License 2.0](../LICENSE_APACHE_2.0.txt).
+
+  In each source file, include the following copyright notice:
+
+  ```copyright
+  /*
+  * Copyright (c) <years additions were made to project> <your name>, Arm Limited. All rights reserved.
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * Licensed under the Apache License, Version 2.0 (the "License");
+  * you may not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+  ```
+
+### Coding standards and guidelines
+
+This repository follows a set of guidelines, best practices, programming styles and conventions,
+see:
+
+- [Coding standards and guidelines](./sections/coding_guidelines.md)
+  - [Introduction](./sections/coding_guidelines.md#introduction)
+  - [Language version](./sections/coding_guidelines.md#language-version)
+  - [File naming](./sections/coding_guidelines.md#file-naming)
+  - [File layout](./sections/coding_guidelines.md#file-layout)
+  - [Block Management](./sections/coding_guidelines.md#block-management)
+  - [Naming Conventions](./sections/coding_guidelines.md#naming-conventions)
+    - [C++ language naming conventions](./sections/coding_guidelines.md#c_language-naming-conventions)
+    - [C language naming conventions](./sections/coding_guidelines.md#c-language-naming-conventions)
+  - [Layout and formatting conventions](./sections/coding_guidelines.md#layout-and-formatting-conventions)
+  - [Language usage](./sections/coding_guidelines.md#language-usage)
+
+### Code Reviews
+
+Contributions must go through code review. Code reviews are performed through the
+[mlplatform.org Gerrit server](https://review.mlplatform.org). Contributors need to signup to this
+Gerrit server with their GitHub account credentials.
+In order to be merged a patch needs to:
+
+- get a "+1 Verified" from the pre-commit job.
+- get a "+2 Code-review" from a reviewer, it means the patch has the final approval.
+
+### Testing
+
+Prior to submitting a patch for review please make sure that all build variants works and unit tests pass.
+Contributions go through testing at the continuous integration system. All builds, tests and checks must pass before a
+contribution gets merged to the master branch.
+
+## Licenses
+
+The ML Embedded applications samples are provided under the Apache 2.0 license, see [License Apache 2.0](../LICENSE_APACHE_2.0.txt).
+
+Application input data sample files are provided under their original license:
+
+|  | Licence | Provenience |
+|---------------|---------|---------|
+| [Automatic Speech Recognition Samples](../resources/asr/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](../resources/LICENSE_CC_4.0.txt) | <http://www.openslr.org/12/> |
+| [Image Classification Samples](../resources/img_class/samples/files.md) | [Creative Commons Attribution 1.0](../resources/LICENSE_CC_1.0.txt) | <https://www.pexels.com> |
+| [Keyword Spotting Samples](../resources/kws/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](../resources/LICENSE_CC_4.0.txt) | <http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz> |
+| [Keyword Spotting and Automatic Speech Recognition Samples](../resources/kws_asr/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](../resources/LICENSE_CC_4.0.txt) | <http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz> |
diff --git a/docs/media/APIs_description.png b/docs/media/APIs_description.png
new file mode 100644
index 0000000..57e2b32
--- /dev/null
+++ b/docs/media/APIs_description.png
Binary files differ
diff --git a/docs/media/ASR_preprocessing.png b/docs/media/ASR_preprocessing.png
new file mode 100644
index 0000000..3383a2e
--- /dev/null
+++ b/docs/media/ASR_preprocessing.png
Binary files differ
diff --git a/docs/media/F1.png b/docs/media/F1.png
new file mode 100644
index 0000000..b843e1e
--- /dev/null
+++ b/docs/media/F1.png
Binary files differ
diff --git a/docs/media/F2.png b/docs/media/F2.png
new file mode 100644
index 0000000..ab903e8
--- /dev/null
+++ b/docs/media/F2.png
Binary files differ
diff --git a/docs/media/F3.png b/docs/media/F3.png
new file mode 100644
index 0000000..0effcb7
--- /dev/null
+++ b/docs/media/F3.png
Binary files differ
diff --git a/docs/media/F4.png b/docs/media/F4.png
new file mode 100644
index 0000000..c7f6ac1
--- /dev/null
+++ b/docs/media/F4.png
Binary files differ
diff --git a/docs/media/KWS_preprocessing.png b/docs/media/KWS_preprocessing.png
new file mode 100644
index 0000000..7a6f3fd
--- /dev/null
+++ b/docs/media/KWS_preprocessing.png
Binary files differ
diff --git a/docs/media/fvp.png b/docs/media/fvp.png
new file mode 100644
index 0000000..ca4ffa5
--- /dev/null
+++ b/docs/media/fvp.png
Binary files differ
diff --git a/docs/media/fvpterminal.png b/docs/media/fvpterminal.png
new file mode 100644
index 0000000..ff39152
--- /dev/null
+++ b/docs/media/fvpterminal.png
Binary files differ
diff --git a/docs/media/mps3.png b/docs/media/mps3.png
new file mode 100644
index 0000000..3fb0dff
--- /dev/null
+++ b/docs/media/mps3.png
Binary files differ
diff --git a/docs/media/vela_flow.jpg b/docs/media/vela_flow.jpg
new file mode 100644
index 0000000..1f052ee
--- /dev/null
+++ b/docs/media/vela_flow.jpg
Binary files differ
diff --git a/docs/quick_start.md b/docs/quick_start.md
new file mode 100644
index 0000000..f557c72
--- /dev/null
+++ b/docs/quick_start.md
@@ -0,0 +1,95 @@
+# Quick start example ML application
+
+This is a quick start guide that will show you how to run the keyword spotting example application. The aim of this guide
+is to illustrate the flow of running an application on the evaluation kit rather than showing the keyword spotting
+functionality or performance. All use cases in the evaluation kit follow the steps.
+
+1. Verify you have installed [the required prerequisites](sections/building.md#Build-prerequisites).
+
+2. Clone the Ethos-U55 evaluation kit repository.
+
+    ```commandline
+    git clone "https://review.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit"
+    cd ml-embedded-evaluation-kit
+    ```
+
+3. Pull all the external dependencies with the commands below:
+
+    ```commandline
+    git submodule update --init
+    ```
+
+4. Next, you would need to get a neural network model. For the purpose of this quick start guide, we'll use the
+    `ds_cnn_clustered_int8` keyword spotting model from the [Arm public model zoo](https://github.com/ARM-software/ML-zoo)
+    and the principle remains the same for all of the other use cases. Download the `ds_cnn_large_int8.tflite` model
+    file with the curl command below:
+
+    ```commandline
+    curl -L https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8/ds_cnn_clustered_int8.tflite?raw=true --output ds_cnn_clustered_int8.tflite
+    ```
+
+5. [Vela](https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela) is an open-source python tool converting
+    TensorFlow Lite for Microcontrollers neural network model into an optimized model that can run on an embedded system
+    containing an Ethos-U55 NPU. It is worth noting that in order to take full advantage of the capabilities of the NPU, the
+    neural network operators should be [supported by Vela](https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/HEAD/SUPPORTED_OPS.md).
+    In this step, you will compile the model with Vela.
+
+    For this step, you need to ensure you have [correctly installed the Vela package](https://pypi.org/project/ethos-u-vela/):
+
+    ```commandline
+    python3 -m venv env
+    source ./env/bin/activate
+    pip install --upgrade pip
+    pip install ethos-u-vela
+    ```
+
+    In the command below, we specify that we are using the Arm® Ethos™-U55 NPU with a 128 Multiply-Accumulate units
+    (MAC units) configured for a High End Embedded use case. The [building section](sections/building.md#Optimize-custom-model-with-Vela-compiler)
+    has more detailed explanation about Vela usage.
+
+    ```commandline
+    vela ds_cnn_clustered_int8.tflite \
+        --accelerator-config=ethos-u55-128 \
+        --block-config-limit=0 \
+        --config scripts/vela/vela.ini \
+        --memory-mode Shared_Sram \
+        --system-config Ethos_U55_High_End_Embedded
+    ```
+
+    An optimized model file for Ethos-U55 is generated in a folder named `output`.
+
+6. Create a `build` folder in the root level of the evaluation kit.
+
+    ```commandline
+    mkdir build && cd build
+    ```
+
+7. Build the makefiles with `CMake` as shown in the command below. The [build process section](sections/building.md#Build-process)
+    gives an in-depth explanation about the meaning of every parameter. For the time being, note that we point the Vela
+    optimized model from stage 5 in the `-Dkws_MODEL_TFLITE_PATH` parameter.
+
+    ```commandline
+    cmake \
+        -DTARGET_PLATFORM=mps3 \
+        -DTARGET_SUBSYSTEM=sse-300 \
+        -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+        -DUSE_CASE_BUILD=kws \
+        -Dkws_MODEL_TFLITE_PATH=output/ds_cnn_clustered_int8_vela.tflite \
+        ..
+    ```
+
+8. Compile the project with a `make`. Details about this stage can be found in the [building part of the documentation](sections/building.md#Building-the-configured-project).
+
+    ```commandline
+    make -j4
+    ```
+
+9. Launch the project as explained [here](sections/deployment.md#Deployment). In this quick-start guide, we'll use the Fixed
+    Virtual Platform. Point the generated `bin/ethos-u-kws.axf` file in stage 8 to the FVP that you have downloaded when
+    installing the prerequisites.
+
+    ```commandline
+    <path_to_FVP>/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-kws.axf
+    ```
+
+10. A telnet window is launched through which you can interact with the application and obtain performance figures.
diff --git a/docs/sections/appendix.md b/docs/sections/appendix.md
new file mode 100644
index 0000000..7b56faa
--- /dev/null
+++ b/docs/sections/appendix.md
@@ -0,0 +1,20 @@
+# Appendix
+
+## Arm® Cortex®-M55 Memory map overview for Corstone™-300 reference design
+
+The table below is the memory mapping information specific to the Arm® Cortex®-M55.
+
+| Name  | Base address | Limit address |  Size     | IDAU |  Remarks                                                  |
+|-------|--------------|---------------|-----------|------|-----------------------------------------------------------|
+| ITCM  | 0x0000_0000  |  0x0007_FFFF  |   512 kiB |  NS  |   ITCM code region                                        |
+| BRAM  | 0x0100_0000  |  0x0120_0000  |   2 MiB   |  NS  |   FPGA data SRAM region                                   |
+| DTCM  | 0x2000_0000  |  0x2007_FFFF  |  512 kiB  |  NS  |   4 banks for 128 kiB each                                |
+| SRAM  | 0x2100_0000  |  0x213F_FFFF  |  4 MiB    |  NS  |   2 banks of 2 MiB each as SSE-300 internal SRAM region   |
+| DDR   | 0x6000_0000  |  0x6FFF_FFFF  |   256 MiB |  NS  |   DDR memory region                                       |
+| ITCM  | 0x1000_0000  |  0x1007_FFFF  |   512 kiB |  S   |   ITCM code region                                        |
+| BRAM  | 0x1100_0000  |  0x1120_0000  |   2 MiB   |  S   |   FPGA data SRAM region                                   |
+| DTCM  | 0x3000_0000  |  0x3007_FFFF  |   512 kiB |  S   |   4 banks for 128 kiB each                                |
+| SRAM  | 0x3100_0000  |  0x313F_FFFF  |   4 MiB   |  S   |   2 banks of 2 MiB each as SSE-300 internal SRAM region   |
+| DDR   | 0x7000_0000  |  0x7FFF_FFFF  |  256 MiB  |  S   |   DDR memory region                                       |
+
+Default memory map can be found here: https://developer.arm.com/documentation/101051/0002/Memory-model/Memory-map
\ No newline at end of file
diff --git a/docs/sections/building.md b/docs/sections/building.md
new file mode 100644
index 0000000..56771b8
--- /dev/null
+++ b/docs/sections/building.md
@@ -0,0 +1,1023 @@
+# Building the Code Samples application from sources
+
+## Contents
+
+- [Building the Code Samples application from sources](#building-the-code-samples-application-from-sources)
+  - [Contents](#contents)
+  - [Build prerequisites](#build-prerequisites)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+    - [Preparing build environment](#preparing-build-environment)
+    - [Create a build directory](#create-a-build-directory)
+    - [Configuring the build for `MPS3: SSE-300`](#configuring-the-build-for-mps3-sse-300)
+    - [Configuring the build for `MPS3: SSE-200`](#configuring-the-build-for-mps3-sse-200)
+    - [Configuring the build native unit-test](#configuring-the-build-native-unit-test)
+    - [Configuring the build for `simple_platform`](#configuring-the-build-for-simple_platform)
+    - [Building the configured project](#building-the-configured-project)
+  - [Building timing adapter with custom options](#building-timing-adapter-with-custom-options)
+  - [Add custom inputs](#add-custom-inputs)
+  - [Add custom model](#add-custom-model)
+  - [Optimize custom model with Vela compiler](#optimize-custom-model-with-vela-compiler)
+  - [Memory constraints](#memory-constraints)
+  - [Automatic file generation](#automatic-file-generation)
+
+This section assumes the use of an **x86 Linux** build machine.
+
+## Build prerequisites
+
+Before proceeding, please, make sure that the following prerequisites
+are fulfilled:
+
+- Arm Compiler version 6.14 or above is installed and available on the
+    path.
+
+    Test the compiler by running:
+
+    ```commandline
+    armclang -v
+    ```
+
+    ```log
+    Product: ARM Compiler 6.14 Professional
+    Component: ARM Compiler 6.14
+    ```
+
+    > **Note:** Add compiler to the path, if needed:
+    >
+    > `export PATH=/path/to/armclang/bin:$PATH`
+
+- Compiler license is configured correctly
+
+- CMake version 3.15 or above is installed and available on the path.
+    Test CMake by running:
+
+    ```commandline
+    cmake --version
+    ```
+
+    ```log
+    cmake version 3.16.2
+    ```
+
+    > **Note:** Add cmake to the path, if needed:
+    >
+    > `export PATH=/path/to/cmake/bin:$PATH`
+
+- Python 3.6 or above is installed. Test python version by running:
+
+    ```commandline
+    python3 --version
+    ```
+
+    ```log
+    Python 3.6.8
+    ```
+
+- Build system will create python virtual environment during the build
+    process. Please make sure that python virtual environment module is
+    installed:
+
+    ```commandline
+    python3 -m venv
+    ```
+
+- Make or MinGW make For Windows
+
+    ```commandline
+    make --version
+    ```
+
+    ```log
+    GNU Make 4.1
+
+    ...
+    ```
+
+    > **Note:** Add it to the path environment variable, if needed.
+
+- Access to the Internet to download the third party dependencies, specifically: TensorFlow Lite Micro, Arm Ethos-U55
+driver and CMSIS. Instructions for downloading these are listed under [preparing build environment](#preparing-build-environment).
+
+## Build options
+
+The project build system allows user to specify custom NN
+model (in `.tflite` format) or images and compile application binary from
+sources.
+
+The build system uses pre-built TensorFlow Lite for Microcontrollers
+library and Arm® Ethos™-U55 driver libraries from the delivery package.
+
+The build script is parameterized to support different options. Default
+values for build parameters will build the executable compatible with
+the Ethos-U55 Fast Model.
+
+The build parameters are:
+
+- `TARGET_PLATFORM`: Target platform to execute application:
+  - `mps3`
+  - `native`
+  - `simple_plaform`
+
+- `TARGET_SUBSYSTEM`: Platform target subsystem; this specifies the
+    design implementation for the deployment target. For both, the MPS3
+    FVP and the MPS3 FPGA, this should be left to the default value of
+    SSE-300:
+  - `sse-300` (default - [Arm® Corstone™-300](https://developer.arm.com/ip-products/subsystem/corstone/corstone-300))
+  - `sse-200`
+
+- `TENSORFLOW_SRC_PATH`: Path to the root of the TensorFlow directory.
+    The default value points to the TensorFlow submodule in the
+    [ethos-u](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) `dependencies` folder.
+
+- `ETHOS_U55_DRIVER_SRC_PATH`: Path to the Ethos-U55 core driver sources.
+    The default value points to the core_driver submodule in the
+    [ethos-u](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) `dependencies` folder.
+
+- `CMSIS_SRC_PATH`: Path to the CMSIS sources to be used to build TensorFlow
+    Lite Micro library. This parameters is optional and valid only for
+    Arm® Cortex®-M CPU targeted configurations. The default value points to the CMSIS submodule in the
+    [ethos-u](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) `dependencies` folder.
+
+- `ETHOS_U55_ENABLED`: Sets whether the use of Ethos-U55 is available for
+    the deployment target. By default, this is set and therefore
+    application is built with Ethos-U55 supported.
+
+- `CPU_PROFILE_ENABLED`: Sets whether profiling information for the CPU
+    core should be displayed. By default, this is set to false, but can
+    be turned on for FPGA targets. The the FVP, the CPU core's cycle
+    counts are not meaningful and should not be used.
+
+- `LOG_LEVEL`: Sets the verbosity level for the application's output
+    over UART/stdout. Valid values are `LOG_LEVEL_TRACE`, `LOG_LEVEL_DEBUG`,
+    `LOG_LEVEL_INFO`, `LOG_LEVEL_WARN` and `LOG_LEVEL_ERROR`. By default, it
+    is set to `LOG_LEVEL_INFO`.
+
+- `<use_case>_MODEL_TFLITE_PATH`: Path to the model file that will be
+    processed and included into the application axf file. The default
+    value points to one of the delivered set of models. Make sure the
+    model chosen is aligned with the `ETHOS_U55_ENABLED` setting.
+
+  - When using Ethos-U55 backend, the NN model is assumed to be
+    optimized by Vela compiler.
+    However, even if not, it will fall back on the CPU and execute,
+    if supported by TensorFlow Lite Micro.
+
+  - When use of Ethos-U55 is disabled, and if a Vela optimized model
+    is provided, the application will report a failure at runtime.
+
+- `USE_CASE_BUILD`: specifies the list of applications to build. By
+    default, the build system scans sources to identify available ML
+    applications and produces executables for all detected use-cases.
+    This parameter can accept single value, for example,
+    `USE_CASE_BUILD=img_class` or multiple values, for example,
+    `USE_CASE_BUILD="img_class;kws"`.
+
+- `ETHOS_U55_TIMING_ADAPTER_SRC_PATH`: Path to timing adapter sources.
+    The default value points to the `timing_adapter` dependencies folder.
+
+- `TA_CONFIG_FILE`: Path to the CMake configuration file containing the
+    timing adapter parameters. Used only if the timing adapter build is
+    enabled.
+
+- `TENSORFLOW_LITE_MICRO_CLEAN_BUILD`: Optional parameter to enable/disable
+    "cleaning" prior to building for the TensorFlow Lite Micro library.
+    It is enabled by default.
+
+- `TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS`: Optional parameter to enable wiping
+    out TPIP downloads from TensorFlow source tree prior to each build.
+    It is disabled by default.
+
+- `ARMCLANG_DEBUG_DWARF_LEVEL`: When the CMake build type is specified as `Debug`
+    and when armclang toolchain is being used to build for a Cortex-M CPU target,
+    this optional argument can be set to specify the DWARF format.
+    By default, this is set to 4 and is synonymous with passing `-g`
+    flag to the compiler. This is compatible with Arm-DS and other tools
+    which can interpret the latest DWARF format. To allow debugging using
+    the Model Debugger from Arm FastModel Tools Suite, this argument can be used
+    to pass DWARF format version as "3". Note: this option is only available
+    when CMake project is configured with `-DCMAKE_BUILD_TYPE=Debug` argument.
+    Also, the same dwarf format is used for building TensorFlow Lite Micro library.
+
+> **Note:** For details on the specific use case build options, follow the
+> instructions in the use-case specific documentation.
+> Also, when setting any of the CMake configuration parameters that expect a directory/file path , it is advised
+>to **use absolute paths instead of relative paths**.
+
+## Build process
+
+The build process can summarized in three major steps:
+
+- Prepare the build environment by downloading third party sources required, see
+[Preparing build environment](#preparing-build-environment).
+
+- Configure the build for the platform chosen.
+This stage includes:
+  - CMake options configuration
+  - When `<use_case>_MODEL_TFLITE_PATH` build options aren't provided, defaults neural network models are be downloaded
+from [Arm ML-Zoo](https://github.com/ARM-software/ML-zoo/). In case of native build, network's input and output data
+for tests are downloaded.
+  - Some files such as neural network models, network's inputs and output labels are automatically converted
+    into C/C++ arrays, see [Automatic file generation](#automatic-file-generation).
+
+- Build the application.\
+During this stage application and third party libraries are built see [Building the configured project](#building-the-configured-project).
+
+### Preparing build environment
+
+Certain third party sources are required to be present on the development machine to allow the example sources in this
+repository to link against.
+
+1. [TensorFlow Lite Micro repository](https://github.com/tensorflow/tensorflow)
+2. [Ethos-U55 core driver repository](https://review.mlplatform.org/admin/repos/ml/ethos-u/ethos-u-core-driver)
+3. [CMSIS-5](https://github.com/ARM-software/CMSIS_5.git)
+
+These are part of the [ethos-u repository](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) and set as
+submodules of this project.
+
+To pull the submodules:
+
+```sh
+git submodule update --init
+```
+
+This will download all the required components and place them in a tree like:
+
+```tree
+dependencies
+ └── ethos-u
+     ├── cmsis
+     ├── core_driver
+     ├── tensorflow
+     └── ...
+```
+
+> **NOTE**: The default source paths for the TPIP sources assume the above directory structure, but all of the relevant
+>paths can be overridden by CMake configuration arguments `TENSORFLOW_SRC_PATH`, `ETHOS_U55_DRIVER_SRC_PATH`,
+>and `CMSIS_SRC_PATH`.
+
+### Create a build directory
+
+Create a build directory in the root of the project and navigate inside:
+
+```commandline
+mkdir build && cd build
+```
+
+### Configuring the build for `MPS3: SSE-300`
+
+On Linux, execute the following command to build the application to run
+on the Ethos-U55 when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific
+file to set the compiler and platform specific parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 ..
+```
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver and CMSIS are not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` and `CMSIS_SRC_PATH` can be used to configure their location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DCMSIS_SRC_PATH=/my/custom/location/cmsis ..
+```
+
+> **Note:** If re-building with changed parameters values, it is
+highly advised to clean the build directory and re-run the CMake command.
+
+### Configuring the build for `MPS3: SSE-200`
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-200 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+for Windows add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-200 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -G "MinGW Makefiles ..
+```
+
+### Configuring the build native unit-test
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=native \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/native-toolchain.cmake ..
+```
+
+For Windows add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=native \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/native-toolchain.cmake \
+    -G "MinGW Makefiles ..
+```
+
+Results of the build will be placed under `build/bin/` folder:
+
+```tree
+ bin
+  |- dev_ethosu_eval-tests
+  |_ ethos-u
+```
+
+### Configuring the build for `simple_platform`
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=simple_platform \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=simple_platform \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/bare-metal-toolchain.cmake \
+    -G "MinGW Makefiles" ..
+```
+
+### Building the configured project
+
+If the CMake command succeeds, build the application as follows:
+
+```commandline
+make -j4
+```
+
+or for Windows:
+
+```commandline
+mingw32-make -j4
+```
+
+Add `VERBOSE=1` to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder, an
+example:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-<use_case_name>.axf
+ â”œâ”€â”€ ethos-u-<use_case_name>.htm
+ â”œâ”€â”€ ethos-u-<use_case_name>.map
+ â”œâ”€â”€ images-<use_case_name>.txt
+ â””── sectors
+        └── <use_case>
+                ├── dram.bin
+                └── itcm.bin
+```
+
+Where for each implemented use-case under the `source/use-case` directory,
+the following build artefacts will be created:
+
+- `ethos-u-<use case name>.axf`: The built application binary for a ML
+    use case.
+
+- `ethos-u-<use case name>.map`: Information from building the
+    application (e.g. libraries used, what was optimized, location of
+    objects).
+
+- `ethos-u-<use case name>.htm`: Human readable file containing the
+    call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files
+    for loading into different FPGA memory regions.
+
+- `images-<use case name>.txt`: Tells the FPGA which memory regions to
+    use for loading the binaries in sectors/** folder.
+
+> **Note:**  For the specific use case commands see the relative section
+in the use case documentation.
+
+## Building timing adapter with custom options
+
+The sources also contains the configuration for a timing adapter utility
+for the Ethos-U55 driver. The timing adapter allows the platform to simulate user
+provided memory bandwidth and latency constraints.
+
+The timing adapter driver aims to control the behavior of two AXI buses
+used by Ethos-U55. One is for SRAM memory region and the other is for
+flash or DRAM. The SRAM is where intermediate buffers are expected to be
+allocated and therefore, this region can serve frequent R/W traffic
+generated by computation operations while executing a neural network
+inference. The flash or DDR is where we expect to store the model
+weights and therefore, this bus would typically be used only for R/O
+traffic.
+
+It is used for MPS3 FPGA as well as for Fast Model environment.
+
+The CMake build framework allows the parameters to control the behavior
+of each bus with following parameters:
+
+- `MAXR`: Maximum number of pending read operations allowed. 0 is
+    inferred as infinite, and the default value is 4.
+
+- `MAXW`: Maximum number of pending write operations allowed. 0 is
+    inferred as infinite, and the default value is 4.
+
+- `MAXRW`: Maximum number of pending read+write operations allowed. 0 is
+    inferred as infinite, and the default value is 8.
+
+- `RLATENCY`: Minimum latency, in cycle counts, for a read operation.
+    This is the duration between ARVALID and RVALID signals. The default
+    value is 50.
+
+- `WLATENCY`: Minimum latency, in cycle counts, for a write operation.
+    This is the duration between WVALID + WLAST and BVALID being
+    de-asserted. The default value is 50.
+
+- `PULSE_ON`: Number of cycles during which addresses are let through.
+    The default value is 5100.
+
+- `PULSE_OFF`: Number of cycles during which addresses are blocked. The
+    default value is 5100.
+
+- `BWCAP`: Maximum number of 64-bit words transferred per pulse cycle. A
+    pulse cycle is PULSE_ON + PULSE_OFF. 0 is inferred as infinite, and
+    the default value is 625.
+
+- `MODE`: Timing adapter operation mode. Default value is 0
+
+  - Bit 0: 0=simple; 1=latency-deadline QoS throttling of read vs.
+        write
+
+  - Bit 1: 1=enable random AR reordering (0=default),
+
+  - Bit 2: 1=enable random R reordering (0=default),
+
+  - Bit 3: 1=enable random B reordering (0=default)
+
+For timing adapter's CMake build configuration, the SRAM AXI is assigned
+index 0 and the flash/DRAM AXI bus has index 1. To change the bus
+parameter for the build a "***TA_\<index>_**"* prefix should be added
+to the above. For example, **TA0_MAXR=10** will set the SRAM AXI bus's
+maximum pending reads to 10.
+
+As an example, if we have the following parameters for flash/DRAM
+region:
+
+- `TA1_MAXR` = "2"
+
+- `TA1_MAXW` = "0"
+
+- `TA1_MAXRW` = "0"
+
+- `TA1_RLATENCY` = "64"
+
+- `TA1_WLATENCY` = "32"
+
+- `TA1_PULSE_ON` = "320"
+
+- `TA1_PULSE_OFF` = "80"
+
+- `TA1_BWCAP` = "50"
+
+For a clock rate of 500MHz, this would translate to:
+
+- The maximum duty cycle for any operation is:\
+![Maximum duty cycle formula](../media/F1.png)
+
+- Maximum bit rate for this bus (64-bit wide) is:\
+![Maximum bit rate formula](../media/F2.png)
+
+- With a read latency of 64 cycles, and maximum pending reads as 2,
+    each read could be a maximum of 64 or 128 bytes, as defined for
+    Ethos-U55\'s AXI bus\'s attribute.
+
+    The bandwidth is calculated solely by read parameters ![Bandwidth formula](
+        ../media/F3.png)
+
+    This is higher than the overall bandwidth dictated by the bus parameters
+    of \
+    ![Overall bandwidth formula](../media/F4.png)
+
+This suggests that the read operation is limited only by the overall bus
+bandwidth.
+
+Timing adapter requires recompilation to change parameters. Default timing
+adapter configuration file pointed to by `TA_CONFIG_FILE` build parameter is
+located in the scripts/cmake folder and contains all options for AXI0 and
+AXI1 described above.
+
+An example of scripts/cmake/ta_config.cmake:
+
+```cmake
+# Timing adapter options
+set(TA_INTERACTIVE OFF)
+
+# Timing adapter settings for AXI0
+set(TA0_MAXR "8")
+set(TA0_MAXW "8")
+set(TA0_MAXRW "0")
+set(TA0_RLATENCY "32")
+set(TA0_WLATENCY "32")
+set(TA0_PULSE_ON "3999")
+set(TA0_PULSE_OFF "1")
+set(TA0_BWCAP "4000")
+...
+```
+
+An example of the build with custom timing adapter configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTA_CONFIG_FILE=scripts/cmake/my_ta_config.cmake ..
+```
+
+## Add custom inputs
+
+The application performs inference on input data found in the folder set
+by the CMake parameters, for more information see the 3.3 section in the
+specific use case documentation.
+
+## Add custom model
+
+The application performs inference using the model pointed to by the
+CMake parameter `MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom
+model has been run through the Vela compiler successfully before continuing.
+
+To run the application with a custom model you will need to provide a
+labels_<model_name>.txt file of labels associated with the model.
+Each line of the file should correspond to one of the outputs in your
+model. See the provided labels_mobilenet_v2_1.0_224.txt file in the
+img_class use case for an example.
+
+Then, you must set `<use_case>_MODEL_TFLITE_PATH` to the location of
+the Vela processed model file and `<use_case>_LABELS_TXT_FILE` to the
+location of the associated labels file:
+
+```commandline
+cmake \
+    -D<use_case>_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -D<use_case>_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+> **Note:** For the specific use case command see the relative section in the use case documentation.
+
+For Windows, add `-G MinGW Makefiles` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The TensorFlow Lite for Microcontrollers model pointed to by `<use_case>_MODEL_TFLITE_PATH` and
+labels text file pointed to by `<use_case>_LABELS_TXT_FILE` will be
+converted to C++ files during the CMake configuration stage and then
+compiled into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path and
+labels file have been used:
+
+```log
+-- User option TARGET_PLATFORM is set to mps3
+-- User option <use_case>_MODEL_TFLITE_PATH is set to
+<path/to/custom_model_after_vela.tflite>
+...
+-- User option <use_case>_LABELS_TXT_FILE is set to
+<path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build>/generated/include/Labels.hpp and <path/to/build>/generated/src/Labels.cc
+...
+```
+
+After compiling, your custom model will have now replaced the default
+one in the application.
+
+## Optimize custom model with Vela compiler
+
+> **Note:** This tool is not available within this project.
+It is a python tool available from <https://pypi.org/project/ethos-u-vela/>.
+The source code is hosted on <https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/>.
+
+The Vela compiler is a tool that can optimize a neural network model
+into a version that can run on an embedded system containing Ethos-U55.
+
+The optimized model will contain custom operators for sub-graphs of the
+model that can be accelerated by Ethos-U55, the remaining layers that
+cannot be accelerated are left unchanged and will run on the CPU using
+optimized (CMSIS-NN) or reference kernels provided by the inference
+engine.
+
+After the compilation, the optimized model can only be executed on a
+system with Ethos-U55.
+
+> **Note:** The NN model provided during the build and compiled into the application
+executable binary defines whether CPU or NPU is used to execute workloads.
+If unoptimized model is used, then inference will run on Cortex-M CPU.
+
+Vela compiler accepts parameters to influence a model optimization. The
+model provided within this project has been optimized with
+the following parameters:
+
+```commandline
+vela \
+    --accelerator-config=ethos-u55-128 \
+    --block-config-limit=0 \
+    --config my_vela_cfg.ini \
+    --memory-mode Shared_Sram \
+    --system-config Ethos_U55_High_End_Embedded \
+    <model>.tflite
+```
+
+Where:
+
+- `--accelerator-config`: Specify the accelerator configuration to use
+    between ethos-u55-256, ethos-u55-128, ethos-u55-64 and ethos-u55-32.
+- `--block-config-limit`: Limit block config search space, use zero for
+    unlimited.
+- `--config`: Specifies the path to the Vela configuration file. The format of the file is a Python ConfigParser .ini file.
+    An example can be found in the `dependencies` folder [vela.ini](../../scripts/vela/vela.ini).
+- `--memory-mode`: Selects the memory mode to use as specified in the Vela configuration file.
+- `--system-config`:Selects the system configuration to use as specified in the Vela configuration file.
+
+Vela compiler accepts `.tflite` file as input and saves optimized network
+model as a `.tflite` file.
+
+Using `--show-cpu-operations` and `--show-subgraph-io-summary` will show
+all the operations that fall back to the CPU and a summary of all the
+subgraphs and their inputs and outputs.
+
+To see Vela helper for all the parameters use: `vela --help`.
+
+Please, get in touch with your Arm representative to request access to
+Vela Compiler documentation for more details.
+
+> **Note:** By default, use of the Ethos-U55 is enabled in the CMake configuration.
+This could be changed by passing `-DETHOS_U55_ENABLED`.
+
+## Memory constraints
+
+Both the MPS3 Fixed Virtual Platform and the MPS3 FPGA platform share
+the linker script (scatter file) for SSE-300 design. The design is set
+by the CMake configuration parameter `TARGET_SUBSYSTEM` as described in
+the previuous section.
+
+The memory map exposed by this design is presented in Appendix 1. This
+can be used as a reference when editing the scatter file, especially to
+make sure that region boundaries are respected. The snippet from MPS3's
+scatter file is presented below:
+
+```
+;---------------------------------------------------------
+; First load region
+;---------------------------------------------------------
+LOAD_REGION_0 0x00000000 0x00080000
+{
+    ;-----------------------------------------------------
+    ; First part of code mem -- 512kiB
+    ;-----------------------------------------------------
+    itcm.bin 0x00000000 0x00080000
+    {
+        *.o (RESET, +First)
+        * (InRoot$$Sections)
+        .ANY (+RO)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of 512kiB bank is used for any other RW or ZI
+    ; data. Note: this region is internal to the Cortex-M CPU
+    ;-----------------------------------------------------
+    dtcm.bin 0x20000000 0x00020000
+    {
+        .ANY(+RW +ZI)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of stack space within the DTCM region
+    ;-----------------------------------------------------
+    ARM_LIB_STACK 0x20020000 EMPTY ALIGN 8 0x00020000
+    {}
+
+    ;-----------------------------------------------------
+    ; 256kiB of heap space within the DTCM region
+    ;-----------------------------------------------------
+
+    ARM_LIB_HEAP 0x20040000 EMPTY ALIGN 8 0x00040000
+    {}
+
+    ;-----------------------------------------------------
+    ; SSE-300's internal SRAM
+    ;-----------------------------------------------------
+    isram.bin 0x21000000 UNINIT ALIGN 16 0x00080000
+    {
+        ; activation buffers a.k.a tensor arena
+        *.o (.bss.NoInit.activation_buf)
+    }
+}
+
+;---------------------------------------------------------
+; Second load region
+;---------------------------------------------------------
+LOAD_REGION_1 0x60000000 0x02000000
+{
+    ;-----------------------------------------------------
+    ; 32 MiB of DRAM space for nn model and input vectors
+    ;-----------------------------------------------------
+    dram.bin 0x60000000 ALIGN 16 0x02000000
+    {
+        ; nn model's baked in input matrices
+        *.o (ifm)
+
+        ; nn model
+        *.o (nn_model)
+
+        ; if the activation buffer (tensor arena) doesn't
+        ; fit in the SRAM region, we accommodate it here
+        *.o (activation_buf)
+    }
+}
+```
+
+It is worth noting that in the bitfile implementation, only the BRAM,
+internal SRAM and DDR memory regions are accessible to the Ethos-U55
+block. In the above snippet, the internal SRAM region memory can be seen
+to be utilized by activation buffers with a limit of 512kiB. If used,
+this region will be written to by the Ethos-U55 block frequently. A bigger
+region of memory for storing the model is placed in the DDR region,
+under LOAD_REGION_1. The two load regions are necessary as the MPS3's
+motherboard configuration controller limits the load size at address
+0x00000000 to 512kiB. This has implications on how the application **is
+deployed** on MPS3 as explained under the section 3.8.3.
+
+## Automatic file generation
+
+As mentioned in the previous sections, some files such as neural network
+models, network's inputs, and output labels are automatically converted
+into C/C++ arrays during the CMake project configuration stage.
+Additionally, some code is generated to allow access to these arrays.
+
+An example:
+
+```log
+-- Building use-cases: img_class.
+-- Found sources for use-case img_class
+-- User option img_class_FILE_PATH is set to /tmp/samples
+-- User option img_class_IMAGE_SIZE is set to 224
+-- User option img_class_LABELS_TXT_FILE is set to /tmp/labels/labels_model.txt
+-- Generating image files from /tmp/samples
+++ Converting cat.bmp to cat.cc
+++ Converting dog.bmp to dog.cc
+-- Skipping file /tmp/samples/files.md due to unsupported image format.
+++ Converting kimono.bmp to kimono.cc
+++ Converting tiger.bmp to tiger.cc
+++ Generating /tmp/build/generated/img_class/include/InputFiles.hpp
+-- Generating labels file from /tmp/labels/labels_model.txt
+-- writing to /tmp/build/generated/img_class/include/Labels.hpp and /tmp/build/generated/img_class/src/Labels.cc
+-- User option img_class_ACTIVATION_BUF_SZ is set to 0x00200000
+-- User option img_class_MODEL_TFLITE_PATH is set to /tmp/models/model.tflite
+-- Using /tmp/models/model.tflite
+++ Converting model.tflite to    model.tflite.cc
+...
+```
+
+In particular, the building options pointing to the input files `<use_case>_FILE_PATH`,
+the model `<use_case>_MODEL_TFLITE_PATH` and labels text file `<use_case>_LABELS_TXT_FILE`
+are used by python scripts in order to generate not only the converted array files,
+but also some headers with utility functions.
+
+For example, the generated utility functions for image classification are:
+
+- `build/generated/include/InputFiles.hpp`
+
+```c++
+#ifndef GENERATED_IMAGES_H
+#define GENERATED_IMAGES_H
+
+#include <cstdint>
+
+#define NUMBER_OF_FILES  (2U)
+#define IMAGE_DATA_SIZE  (150528U)
+
+extern const uint8_t im0[IMAGE_DATA_SIZE];
+extern const uint8_t im1[IMAGE_DATA_SIZE];
+
+const char* get_filename(const uint32_t idx);
+const uint8_t* get_img_array(const uint32_t idx);
+
+#endif /* GENERATED_IMAGES_H */
+```
+
+- `build/generated/src/InputFiles.cc`
+
+```c++
+#include "InputFiles.hpp"
+
+static const char *img_filenames[] = {
+    "img1.bmp",
+    "img2.bmp",
+};
+
+static const uint8_t *img_arrays[] = {
+    im0,
+    im1
+};
+
+const char* get_filename(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_filenames[idx];
+    }
+    return nullptr;
+}
+
+const uint8_t* get_img_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_arrays[idx];
+    }
+    return nullptr;
+}
+```
+
+These headers are generated using python templates, that are in `scripts/py/templates/*.template`.
+
+```tree
+scripts/
+├── cmake
+│   ├── ...
+│   ├── subsystem-profiles
+│   │   ├── corstone-sse-200.cmake
+│   │   └── corstone-sse-300.cmake
+│   ├── templates
+│   │   ├── mem_regions.h.template
+│   │   ├── peripheral_irqs.h.template
+│   │   └── peripheral_memmap.h.template
+│   └── ...
+└── py
+    ├── <generation scripts>
+    ├── requirements.txt
+    └── templates
+        ├── audio.cc.template
+        ├── AudioClips.cc.template
+        ├── AudioClips.hpp.template
+        ├── default.hpp.template
+        ├── header_template.txt
+        ├── image.cc.template
+        ├── Images.cc.template
+        ├── Images.hpp.template
+        ├── Labels.cc.template
+        ├── Labels.hpp.template
+        ├── testdata.cc.template
+        ├── TestData.cc.template
+        ├── TestData.hpp.template
+        └── tflite.cc.template
+```
+
+Based on the type of use case the correct conversion is called in the use case cmake file
+(audio or image respectively for voice or vision use cases).
+For example, the generations call for image classification (`source/use_case/img_class/usecase.cmake`):
+
+```c++
+# Generate input files
+generate_images_code("${${use_case}_FILE_PATH}"
+                     ${SRC_GEN_DIR}
+                     ${INC_GEN_DIR}
+                     "${${use_case}_IMAGE_SIZE}")
+
+# Generate labels file
+set(${use_case}_LABELS_CPP_FILE Labels)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE}"
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+)
+
+...
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+)
+```
+
+> **Note:** When required, for models and labels conversion it's possible to add extra parameters such
+> as extra code to put in `<model>.cc` file or namespaces.
+>
+> ```c++
+> set(${use_case}_LABELS_CPP_FILE Labels)
+> generate_labels_code(
+>     INPUT           "${${use_case}_LABELS_TXT_FILE}"
+>     DESTINATION_SRC ${SRC_GEN_DIR}
+>     DESTINATION_HDR ${INC_GEN_DIR}
+>     OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+>     NAMESPACE       "namespace1" "namespace2"
+> )
+>
+> ...
+>
+> set(EXTRA_MODEL_CODE
+>     "/* Model parameters for ${use_case} */"
+>     "extern const int   g_myvariable2     = value1"
+>     "extern const int   g_myvariable2     = value2"
+> )
+>
+> generate_tflite_code(
+>     MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+>     DESTINATION ${SRC_GEN_DIR}
+>     EXPRESSIONS ${EXTRA_MODEL_CODE}
+>     NAMESPACE   "namespace1" "namespace2"
+> )
+> ```
+
+In addition to input file conversions, the correct platform/system profile is selected
+(in `scripts/cmake/subsystem-profiles/*.cmake`) based on `TARGET_SUBSYSTEM` build option
+and the variables set are used to generate memory region sizes, base addresses and IRQ numbers,
+respectively used to generate mem_region.h, peripheral_irqs.h and peripheral_memmap.h headers.
+Templates from `scripts/cmake/templates/*.template` are used to generate the header files.
+
+After the build, the files generated in the build folder are:
+
+```tree
+build/generated/
+├── bsp
+│   ├── mem_regions.h
+│   ├── peripheral_irqs.h
+│   └── peripheral_memmap.h
+├── <use_case_name1>
+│   ├── include
+│   │   ├── InputFiles.hpp
+│   │   └── Labels.hpp
+│   └── src
+│       ├── <uc1_input_file1>.cc
+│       ├── <uc1_input_file2>.cc
+│       ├── InputFiles.cc
+│       ├── Labels.cc
+│       └── <uc1_model_name>.tflite.cc
+└──  <use_case_name2>
+    ├── include
+    │   ├── InputFiles.hpp
+    │   └── Labels.hpp
+    └── src
+        ├── <uc2_input_file1>.cc
+        ├── <uc2_input_file2>.cc
+        ├── InputFiles.cc
+        ├── Labels.cc
+        └── <uc2_model_name>.tflite.cc
+```
+
+Next section of the documentation: [Deployment](../documentation.md#Deployment).
diff --git a/docs/sections/coding_guidelines.md b/docs/sections/coding_guidelines.md
new file mode 100644
index 0000000..f1813d3
--- /dev/null
+++ b/docs/sections/coding_guidelines.md
@@ -0,0 +1,323 @@
+# Coding standards and guidelines
+
+## Contents
+
+- [Introduction](#introduction)
+- [Language version](#language-version)
+- [File naming](#file-naming)
+- [File layout](#file-layout)
+- [Block Management](#block-management)
+- [Naming Conventions](#naming-conventions)
+  - [C++ language naming conventions](#c_language-naming-conventions)
+  - [C language naming conventions](#c-language-naming-conventions)
+- [Layout and formatting conventions](#layout-and-formatting-conventions)
+- [Language usage](#language-usage)
+
+## Introduction
+
+This document presents some standard coding guidelines to be followed for contributions to this repository. Most of the
+code is written in C++, but there is some written in C as well. There is a clear C/C++ boundary at the Hardware
+Abstraction Layer (HAL). Both these languages follow different naming conventions within this repository, by design, to:
+
+- have clearly distinguishable C and C++ sources.
+- make cross language function calls stand out. Mostly these will be C++ function calls to the HAL functions written in C.
+However, because we also issue function calls to third party API's (and they may not follow these conventions), the
+intended outcome may not be fully realised in all of the cases.
+
+## Language version
+
+For this project, code written in C++ shall use a subset of the C++11 feature set and software
+may be written using the C++11 language standard. Code written in C should be compatible
+with the C99 standard.
+
+Software components written in C/C++ may use the language features allowed and encouraged by this documentation.
+
+## File naming
+
+- C files should have `.c` extension
+- C++ files should have `.cc` or `.cpp` extension.
+- Header files for functions implemented in C should have `.h` extension.
+- Header files for functions implemented in C++ should have `.hpp` extension.
+
+## File layout
+
+- Standard copyright notice must be included in all files:
+
+  ```copyright
+  /*
+  * Copyright (c) <years additions were made to project> <your name>, Arm Limited. All rights reserved.
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * Licensed under the Apache License, Version 2.0 (the "License");
+  * you may not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+  ```
+
+- Source lines must be no longer than 120 characters. Prefer to spread code out vertically rather than horizontally,
+  wherever it makes sense:
+
+  ```C++
+  # This is significantly easier to read
+  enum class SomeEnum1
+  {
+      ENUM_VALUE_1,
+      ENUM_VALUE_2,
+      ENUM_VALUE_3
+  };
+
+  # than this
+  enum class SomeEnum2 { ENUM_VALUE_1, ENUM_VALUE_2, ENUM_VALUE_3 };
+  ```
+
+- Block indentation should use 4 characters, no tabs.
+
+- Each statement must be on a separate line.
+
+  ```C++
+  int a, b; // Error prone
+  int c, *d;
+
+  int e = 0; // GOOD
+  int *p = nullptr; // GOOD
+  ```
+
+- Source must not contain commented out code or unreachable code
+
+## Block Management
+
+- Blocks must use braces and braces location must be consistent.
+  - Each function has its opening brace at the next line on the same indentation level as its header, the code within
+  the braces is indented and the closing brace at the end is on the same level as the opening.
+  For compactness, if the class/function body is empty braces are accepted on the same line.
+
+  - Conditional statements and loops, even if are just single-statement body, needs to be surrounded by braces, the
+opening brace is at the same line, the closing brace is at the next line on the same indentation level as its header;
+the same rule is applied to classes.
+
+    ```C++
+    class Class1 {
+    public:
+        Class1();
+    private:
+        int element;
+    };
+
+    void NotEmptyFunction()
+    {
+        if (condition) {
+            // [...]
+        } else {
+            // [...]
+        }
+        // [...]
+        for(start_cond; end_cond; step_cond) {
+            // [...]
+        }
+    }
+
+    void EmptyFunction() {}
+    ```
+
+  - Cases within switch are indented and enclosed in brackets:
+
+    ```C++
+    switch (option)
+    {
+        case 1:
+        {
+            // handle option 1
+            break;
+        }
+        case 2:
+        {
+            // handle option 2
+            break;
+        }
+        default:
+        {
+            break;
+        }
+    }
+    ```
+
+## Naming Conventions
+
+### C++ language naming conventions
+
+- Type (class, struct, enum) names must be `PascalCase`:
+
+  ```C++
+  class SomeClass
+  {
+      // [...]
+  };
+  void SomeFunction()
+  {
+      // [...]
+  }
+  ```
+
+- Variables and parameter names must be `camelCase`:
+
+  ```C++
+  int someVariable;
+
+  void SomeFunction(int someParameter) {}
+  ```
+
+- Macros, pre-processor definitions, and enumeration values should use upper case names:
+
+  ```C++
+  #define SOME_DEFINE
+
+  enum class SomeEnum
+  {
+      ENUM_VALUE_1,
+      ENUM_VALUE_2
+  };
+  ```
+
+- Namespace names must be lower case
+
+  ```C++
+  namespace nspace
+  {
+  void FunctionInNamespace();
+  };
+  ```
+
+- Source code should use Hungarian notation to annotate the name of a variable with information about its meaning.
+
+  | Prefix | Class | Description |
+  | ------ | ----- | ----------- |
+  | p | Type      | Pointer to any other type |
+  | k | Qualifier | Constant |
+  | v | Qualifier | Volatile |
+  | m | Scope     | Member of a class or struct |
+  | s | Scope     | Static |
+  | g | Scope     | Used to indicate variable has scope beyond the current function: file-scope or externally visible scope|
+
+The following examples  of Hungarian notation are one possible set of uses:
+
+  ```C++
+  int g_GlobalInt=123;
+  char* m_pNameOfMemberPointer=nullptr;
+  const float g_kSomeGlobalConstant = 1.234f;
+  static float ms_MyStaticMember =  4.321f;
+  bool myLocalVariable=true;
+  ```
+
+### C language naming conventions
+
+For C sources, we follow the Linux variant of the K&R style wherever possible.
+
+- For function and variable names we use `snake_case` convention:
+
+  ```C
+  int some_variable;
+
+  void some_function(int some_parameter) {}
+  ```
+
+- Macros, pre-processor definitions, and enumeration values should use upper case names:
+
+  ```C
+  #define SOME_DEFINE
+
+  enum some_enum
+  {
+      ENUM_VALUE_1,
+      ENUM_VALUE_2
+  };
+  ```
+
+## Layout and formatting conventions
+
+- C++ class code layout
+  Public function definitions should be at the top of a class definition, since they are things most likely to be used
+by other people.
+  Private functions and member variables should be last.
+  Class functions and member variables should be laid out logically in blocks of related functionality.
+
+- Class  inheritance keywords are not indented.
+
+  ```C++
+  class MyClass
+  {
+  public:
+    int m_PublicMember;
+  protected:
+    int m_ProtectedMember;
+  private:
+    int m_PrivateMember;
+  };
+  ```
+
+- Don't leave trailing spaces at the end of lines.
+
+- Empty lines should have no trailing spaces.
+
+- For pointers and references, the symbols `*` and `&` should be adjacent to the name of the type, not the name
+  of the variable.
+
+  ```C++
+  char* someText = "abc";
+
+  void SomeFunction(const SomeObject& someObject) {}
+  ```
+
+## Language usage
+
+- Header `#include` statements should be minimized.
+  Inclusion of unnecessary headers slows down compilation, and can hide errors where a function calls a
+  subroutine which it should not be using if the unnecessary header defining this subroutine is included.
+
+  Header statements should be included in the following order:
+
+  - Header file corresponding to the current source file (if applicable)
+  - Headers from the same component
+  - Headers from other components
+  - Third-party headers
+  - System headers
+
+  > **Note:** Leave one blank line between each of these groups for readability.
+  >Use quotes for headers from within the same project and angle brackets for third-party and system headers.
+  >Do not use paths relative to the current source file, such as `../Header.hpp`. Instead configure your include paths
+>in the project makefiles.
+
+  ```C++
+  #include "ExampleClass.hpp"     // Own header
+
+  #include "Header1.hpp"          // Header from same component
+  #include "Header1.hpp"          // Header from same component
+
+  #include "other/Header3.hpp"    // Header from other component
+
+  #include <ThirdParty.hpp>       // Third-party headers
+
+  #include <vector>               // System  header
+
+  // [...]
+  ```
+
+- C++ casts should use the template-styled case syntax
+
+  ```C++
+  int a = 100;
+  float b = (float)a; // Not OK
+  float c = static_cast<float>(a); // OK
+  ```
+
+- Use the const keyword to declare constants instead of define.
+
+- Should use `nullptr` instead of `NULL`,
+  C++11 introduced the `nullptr` type to distinguish null pointer constants from the integer 0.
diff --git a/docs/sections/customizing.md b/docs/sections/customizing.md
new file mode 100644
index 0000000..e92c327
--- /dev/null
+++ b/docs/sections/customizing.md
@@ -0,0 +1,731 @@
+# Implementing custom ML application
+
+- [Software project description](#software-project-description)
+- [HAL API](#hal-api)
+- [Main loop function](#main-loop-function)
+- [Application context](#application-context)
+- [Profiler](#profiler)
+- [NN Model API](#nn-model-api)
+- [Adding custom ML use case](#adding-custom-ml-use-case)
+- [Implementing main loop](#implementing-main-loop)
+- [Implementing custom NN model](#implementing-custom-nn-model)
+- [Executing inference](#executing-inference)
+- [Printing to console](#printing-to-console)
+- [Reading user input from console](#reading-user-input-from-console)
+- [Output to MPS3 LCD](#output-to-mps3-lcd)
+- [Building custom use case](#building-custom-use-case)
+
+This section describes how to implement a custom Machine Learning
+application running on Fast Model FVP or on the Arm MPS3 FPGA prototyping board.
+
+Arm® Ethos™-U55 code sample software project offers a simple way to incorporate
+additional use-case code into the existing infrastructure and provides a build
+system that automatically picks up added functionality and produces corresponding
+executable for each use-case. This is achieved by following certain configuration
+and code implementation conventions.
+
+The following sign will indicate the important conventions to apply:
+
+> **Convention:** The code is developed using C++11 and C99 standards.
+This is governed by TensorFlow Lite for Microcontrollers framework.
+
+## Software project description
+
+As mentioned in the [Repository structure](../documentation.md#repository-structure) section, project sources are:
+
+```tree
+├── docs
+│ ├── ...
+│ └── Documentation.md
+├── resources
+│ └── img_class
+│      └── ...
+├── scripts
+│ └── ...
+├── source
+│ ├── application
+│ │ ├── hal
+│ │ ├── main
+│ │ └── tensorflow-lite-micro
+│ └── use_case
+│     └──img_class
+├── CMakeLists.txt
+└── Readme.md
+```
+
+Where `source` contains C/C++ sources for the platform and ML applications.
+Common code related to the Ethos-U55 code samples software
+framework resides in the *application* sub-folder and ML application specific logic (use-cases)
+sources are in the *use-case* subfolder.
+
+> **Convention**: Separate use-cases must be organized in sub-folders under the use-case folder.
+The name of the directory is used as a name for this use-case and could be provided
+as a `USE_CASE_BUILD` parameter value.
+It is expected by the build system that sources for the use-case are structured as follows:
+headers in an include directory, C/C++ sources in a src directory.
+For example:
+>
+>```tree
+>use_case
+> └──img_class
+>       ├── include
+>       │   └── *.hpp
+>       └── src
+>           └── *.cc
+>```
+
+## HAL API
+
+Hardware abstraction layer is represented by the following interfaces.
+To access them, include hal.h header.
+
+- *hal_platfrom* structure:\
+    Structure that defines a platform context to be used by the application
+
+  |  Attribute name    | Description                                                                                                                                                         |
+  |--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+  |  inited            |  Initialization flag. Is set after the platfrom_init() function is called.                                                                                          |
+  |  plat_name         |  Platform name. it is set to "mps3-bare" for MPS3 build and "FVP" for Fast Model build.                                                                             |
+  |  data_acq          |  Pointer to data acquisition module responsible for user interaction and other data collection for the application logic.                                           |
+  |  data_psn          |  Pointer to data presentation module responsible for data output through components available in the selected platform: LCD -- for MPS3, console -- for Fast Model. |
+  |  timer             |  Pointer to platform timer implementation (see platform_timer)                                                                                                      |
+  |  platform_init     |  Pointer to platform initialization function.                                                                                                                       |
+  |  platform_release  |  Pointer to platform release function                                                                                                                               |
+
+- *hal_init* function:\
+    Initializes the HAL structure based on compile time config. This
+    should be called before any other function in this API.
+
+  |  Parameter name  | Description|
+  |------------------|-----------------------------------------------------|
+  |  platform        | Pointer to a pre-allocated *hal_platfrom* struct.   |
+  |  data_acq        | Pointer to a pre-allocated data acquisition module  |
+  |  data_psn        | Pointer to a pre-allocated data presentation module |
+  |  timer           | Pointer to a pre-allocated timer module             |
+  |  return          | zero if successful, error code otherwise            |
+
+- *hal_platform_init* function:\
+  Initializes the HAL platform and all the modules on the platform the
+  application requires to run.
+
+  | Parameter name  | Description                                                         |
+  | ----------------| ------------------------------------------------------------------- |
+  | platform        | Pointer to a pre-allocated and initialized *hal_platfrom* struct.   |
+  | return          | zero if successful, error code otherwise.                           |
+
+- *hal_platform_release* function\
+  Releases the HAL platform. This should release resources acquired.
+
+  | Parameter name  | Description                                                         |
+  | ----------------| ------------------------------------------------------------------- |
+  |  platform       | Pointer to a pre-allocated and initialized *hal_platfrom* struct.   |
+
+- *data_acq_module* structure:\
+  Structure to encompass the data acquisition module and it's
+  methods.
+
+  | Attribute name | Description                                        |
+  |----------------|----------------------------------------------------|
+  | inited         | Initialization flag. Is set after the system_init () function is called. |
+  | system_name  | Channel name. It is set to "UART" for MPS3 build and fastmodel builds. |
+  | system_init    | Pointer to data acquisition module initialization function. The pointer is set according to the platform selected during the build. This function is called by the platforminitialization routines.                           |
+  | get_input      | Pointer to a function reading user input. The pointer is set according to the selected platform during the build. For MPS3 and fastmodel environments, the function reads data from UART.   |
+
+- *data_psn_module* structure:\
+  Structure to encompass the data presentation module and its methods.
+
+  | Attribute name     | Description                                    |
+  |--------------------|------------------------------------------------|
+  | inited             | Initialization flag. It is set after the system_init () function is called. |
+  | system_name        | System component name used to present data. It is set to "lcd" for MPS3 build and to "log_psn" for fastmodel build. In case of fastmodel, all pixel drawing functions are replaced by console output of the data summary.                              |
+  | system_init        | Pointer to data presentation module initialization function. The pointer is set according to the platform selected during the build. This function is called by the platform initialization routines. |
+  | present_data_image | Pointer to a function to draw an image. The pointer is set according to the selected platform during the build. For MPS3, the image will be drawn on the LCD; for fastmodel  image summary will be printed in the UART  (coordinates, channel info, downsample factor) |
+  | present_data_text  | Pointer to a function to print a text. The pointer is set according to the selected platform during the build. For MPS3, the text will be drawn on the LCD; for fastmodel text will be printed in the UART. |
+  | present_box        | Pointer to a function to draw a rectangle. The pointer is set according to the selected platform during the build. For MPS3, the image will be drawn on the LCD; for fastmodel  image summary will be printed in the UART. |
+  | clear              | Pointer to a function to clear the output. The pointer is set according to the selected platform during the build. For MPS3, the function will clear the LCD; for fastmodel will do nothing. |
+  | set_text_color     | Pointer to a function to set text color for the next call of present_data_text() function. The pointer is set according to the selected platform during the build. For MPS3, the function will set the color for the text printed on the LCD; for fastmodel -- will do nothing. |
+  | set_led            | Pointer to a function controlling an LED (led_num) with on/off  |
+
+- *platform_timer* structure:\
+    Structure to hold a platform specific timer implementation.
+
+  | Attribute name     | Description                                    |
+  |--------------------|------------------------------------------------|
+  |  inited            |  Initialization flag. It is set after the timer is initialized by the *hal_platform_init* function. |
+  |  reset             |   Pointer to a function to reset a timer. |
+  |  get_time_counter  |   Pointer to a function to get current time counter. |
+  |  get_duration_ms   |   Pointer to a function to calculate duration between two time-counters in milliseconds. |
+  |  get_duration_us   |   Pointer to a function to calculate duration between two time-counters in microseconds |
+  |  get_npu_cycle_diff |  Pointer to a function to calculate duration between two time-counters in Ethos-U55 cycles. Available only when project is configured with ETHOS_U55_ENABLED set. |
+
+Example of the API initialization in the main function:
+
+```c++
+#include "hal.h"
+
+int main ()
+
+{
+
+  hal_platform platform;
+  data_acq_module dataAcq;
+  data_psn_module dataPsn;
+  platform_timer timer;
+
+  /* Initialise the HAL and platform */
+  hal_init(&platform, &dataAcq, &dataPsn, &timer);
+  hal_platform_init(&platform);
+
+  ...
+
+  hal_platform_release(&platform);
+
+  return 0;
+
+}
+```
+
+## Main loop function
+
+Code samples application main function will delegate the use-case
+logic execution to the main loop function that must be implemented for
+each custom ML scenario.
+
+Main loop function takes the initialized *hal_platform* structure
+pointer as an argument.
+
+The main loop function has external linkage and main executable for the
+use-case will have reference to the function defined in the use-case
+code.
+
+```c++
+void main_loop(hal_platform& platform){
+
+...
+
+}
+```
+
+## Application context
+
+Application context could be used as a holder for a state between main
+loop iterations. Include AppContext.hpp to use ApplicationContext class.
+
+| Method name  | Description                                                     |
+|--------------|-----------------------------------------------------------------|
+|  Set         |  Saves given value as a named attribute in the context.         |
+|  Get         |  Gets the saved attribute from the context by the given name.   |
+|  Has         |  Checks if an attribute with a given name exists in the context. |
+
+For example:
+
+```c++
+#include "hal.h"
+#include "AppContext.hpp"
+
+void main_loop(hal_platform& platform) {
+
+    /* Instantiate application context */
+    arm::app::ApplicationContext caseContext;
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<uint32_t>("counter", 0);
+
+    /* loop */
+  while (true) {
+    // do something, pass application context down the call stack
+  }
+}
+```
+
+## Profiler
+
+Profiler is a helper class assisting in collection of timings and
+Ethos-U55 cycle counts for operations. It uses platform timer to get
+system timing information.
+
+| Method name          | Description                                               |
+|----------------------|-----------------------------------------------------------|
+|  StartProfiling      | Starts profiling and records the starting timing data.    |
+|  StopProfiling       | Stops profiling and records the ending timing data.       |
+|  Reset               | Resets the profiler and clears all collected data.        |
+|  GetResultsAndReset  | Gets the results as string and resets the profiler.       |
+
+Usage example:
+
+```c++
+Profiler profiler{&platform, "Inference"};
+
+profiler.StartProfiling();
+// Code running inference to profile
+profiler.StopProfiling();
+
+info("%s\n", profiler.GetResultsAndReset().c_str());
+```
+
+## NN Model API
+
+Model (refers to neural network model) is an abstract class wrapping the
+underlying TensorFlow Lite Micro API and providing methods to perform
+common operations such as TensorFlow Lite Micro framework
+initialization, inference execution, accessing input and output tensor
+objects.
+
+To use this abstraction, import TensorFlowLiteMicro.hpp header.
+
+| Method name              | Description                                                                  |
+|--------------------------|------------------------------------------------------------------------------|
+|  GetInputTensor          |   Returns the pointer to the model\'s input tensor.                          |
+|  GetOutputTensor         |   Returns the pointer to the model\'s output tensor                          |
+|  GetType                 |   Returns the model's data type                                              |
+|  GetInputShape           |   Return the pointer to the model\'s input shape                             |
+|  GetOutputShape          |   Return the pointer to the model\'s output shape                            |
+|  LogTensorInfo           |   Logs the tensor information to stdout for the given tensor pointer: tensor name, tensor address, tensor type, tensor memory size and quantization params.  |
+|  LogInterpreterInfo      |   Logs the interpreter information to stdout.                                |
+|  Init                    |   Initializes the TensorFlow Lite Micro framework, allocates require memory for the model. |
+|  IsInited                |  Checks if this model object has been initialized.                           |
+|  IsDataSigned            |  Checks if the model uses signed data type.                                  |
+|  RunInference            |  Runs the inference (invokes the interpreter).                               |
+|  GetOpResolver()         |  Returns the reference to the TensorFlow Lite Micro operator resolver.       |
+|  EnlistOperations        |  Registers required operators with TensorFlow Lite Micro operator resolver.  |
+|  GetTensorArena          |  Returns pointer to memory region to be used for tensors allocations.        |
+|  GetActivationBufferSize |  Returns the size of the tensor arena memory region.                         |
+
+> **Convention**:  Each ML use-case must have extension of this class and implementation of the protected virtual methods:
+>
+>```c++
+>virtual const tflite::MicroOpResolver& GetOpResolver() = 0;
+>virtual bool EnlistOperations() = 0;
+>virtual uint8_t* GetTensorArena() = 0;
+>virtual size_t GetActivationBufferSize() = 0;
+>```
+>
+>Network models have different set of operators that must be registered with
+tflite::MicroMutableOpResolver object in the EnlistOperations method.
+Network models could require different size of activation buffer that is returned as
+tensor arena memory for TensorFlow Lite Micro framework by the GetTensorArena
+and GetActivationBufferSize methods.
+
+Please see MobileNetModel.hpp and MobileNetModel.cc files from image
+classification ML application use-case as an example of the model base
+class extension.
+
+## Adding custom ML use case
+
+This section describes how to implement additional use-case and compile
+it into the binary executable to run with Fast Model or MPS3 FPGA board.
+It covers common major steps: application main loop creation,
+description of the NN model, inference execution.
+
+In addition, few useful examples are provided: reading user input,
+printing into console, drawing images into MPS3 LCD.
+
+```tree
+use_case
+   └──hello_world
+      ├── include
+      └── src
+```
+
+Start with creation of a sub-directory under the *use_case* directory and
+two other directories *src* and *include* as described in
+[Software project description](#software-project-description) section:
+
+## Implementing main loop
+
+Use-case main loop is the place to put use-case main logic. Essentially,
+it is an infinite loop that reacts on user input, triggers use-case
+conditional logic based on the input and present results back to the
+user. However, it could also be a simple logic that runs a single inference
+and then exits.
+
+Main loop has knowledge about the platform and has access to the
+platform components through the hardware abstraction layer (referred to as HAL).
+
+Create a *MainLoop.cc* file in the *src* directory (the one created under
+[Adding custom ML use case](#adding-custom-ml-use-case)), the name is not
+important. Define *main_loop* function with the signature described in
+[Main loop function](#main-loop-function):
+
+```c++
+#include "hal.h"
+
+void main_loop(hal_platform& platform) {
+  printf("Hello world!");
+}
+```
+
+The above is already a working use-case, if you compile and run it (see
+[Building custom usecase](#Building-custom-use-case)) the application will start, print
+message to console and exit straight away.
+
+Now, you can start filling this function with logic.
+
+## Implementing custom NN model
+
+Before inference could be run with a custom NN model, TensorFlow Lite
+Micro framework must learn about the operators/layers included in the
+model. Developer must register operators using *MicroMutableOpResolver*
+API.
+
+Ethos-U55 code samples project has an abstraction around TensorFlow
+Lite Micro API (see [NN model API](#nn-model-api)). Create *HelloWorld.hpp* in
+the use-case include sub-directory, extend Model abstract class and
+declare required methods.
+
+For example:
+
+```c++
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+class HelloWorldModel: public Model {
+  protected:
+    /** @brief   Gets the reference to op resolver interface class. */
+    const tflite::MicroOpResolver& GetOpResolver() override;
+
+    /** @brief   Adds operations to the op resolver instance. */
+    bool EnlistOperations() override;
+
+    const uint8_t* ModelPointer() override;
+
+    size_t ModelSize() override;
+
+  private:
+    /* Maximum number of individual operations that can be enlisted. */
+    static constexpr int _m_maxOpCnt = 5;
+
+    /* A mutable op resolver instance. */
+    tflite::MicroMutableOpResolver<_maxOpCnt> _m_opResolver;
+  };
+} /* namespace app */
+} /* namespace arm */
+```
+
+Create `HelloWorld.cc` file in the `src` sub-directory and define the methods
+there. Include `HelloWorldModel.hpp` created earlier. Note that `Model.hpp`
+included in the header provides access to TensorFlow Lite Micro's operation
+resolver API.
+
+Please, see `use_case/image_classifiaction/src/MobileNetModel.cc` for
+code examples.\
+If you are using a TensorFlow Lite model compiled with Vela, it is important to add
+custom Ethos-U55 operator to the operators list.
+
+The following example shows how to add the custom Ethos-U55 operator with
+TensorFlow Lite Micro framework. We will use the ARM_NPU define to exclude
+the code if the application was built without NPU support.
+
+```c++
+#include "HelloWorldModel.hpp"
+
+bool arm::app::HelloWorldModel::EnlistOperations() {
+
+  #if defined(ARM_NPU)
+    if (kTfLiteOk == this->_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+  #endif /* ARM_NPU */
+
+    return true;
+}
+```
+
+To minimize application memory footprint, it is advised to register only
+operators used by the NN model.
+
+Define `ModelPointer` and `ModelSize` methods. These functions are wrappers around the
+functions generated in the C++ file containing the neural network model as an array.
+This generation the C++ array from the .tflite file, logic needs to be defined in
+the `usecase.cmake` file for this `HelloWorld` example.
+
+For more details on `usecase.cmake`, see [Building custom use case](#building-custom-use-case).
+For details on code generation flow in general, see [Automatic file generation](./building.md#Automatic-file-generation)
+
+The TensorFlow Lite model data is read during Model::init() method execution, see
+*application/tensorflow-lite-micro/Model.cc* for more details. Model invokes
+`ModelPointer()` function which calls the `GetModelPointer()` function to get
+neural network model data memory address. The `GetModelPointer()` function
+will be generated during the build and could be found in the
+file `build/generated/hello_world/src/<model_file_name>.cc`. Generated
+file is added to the compilation automatically.
+
+Use \${use-case}_MODEL_TFLITE_PATH build parameter to include custom
+model to the generation/compilation process (see [Build options](./building.md/#build-options)).
+
+## Executing inference
+
+To run an inference successfully it is required to have:
+
+- a TensorFlow Lite model file
+- extended Model class
+- place to add the code to invoke inference
+- main loop function
+- and some input data.
+
+For the hello_world example below, the input array is not populated.
+However, for real-world scenarios, this data should either be read from
+an on-board device or be prepared in the form of C++ sources before
+compilation and be baked into the application.
+
+For example, the image classification application has extra build steps
+to generate C++ sources from the provided images with
+*generate_images_code* CMake function.
+
+> **Note:**
+Check the input data type for your NN model and input array data type are  the same.
+For example, generated C++ sources for images store image data as uint8 array. For models that were
+quantized to int8 data type, it is important to convert image data to int8 correctly before inference execution.
+Asymmetric data type to symmetric data type conversion involves positioning zero value, i.e. subtracting an
+offset for uint8 values. Please check image classification application source for the code example
+(ConvertImgToInt8 function).
+
+The following code adds inference invocation to the main loop function:
+
+```c++
+#include "hal.h"
+#include "HelloWorldModel.hpp"
+
+  void main_loop(hal_platform& platform) {
+
+  /* model wrapper object */
+  arm::app::HelloWorldModel model;
+
+  /* Load the model */
+  if (!model.Init()) {
+    printf_err("failed to initialise model\n");
+    return;
+  }
+
+  TfLiteTensor *outputTensor = model.GetOutputTensor();
+  TfLiteTensor *inputTensor = model.GetInputTensor();
+
+  /* dummy input data*/
+  uint8_t inputData[1000];
+
+  memcpy(inputTensor->data.data, inputData, 1000);
+
+  /* run inference */
+  model.RunInference();
+
+  const uint32_t tensorSz = outputTensor->bytes;
+  const uint8_t * outputData = tflite::GetTensorData<uint8>(outputTensor);
+}
+```
+
+The code snippet has several important blocks:
+
+- Creating HelloWorldModel object and initializing it.
+
+  ```c++
+  arm::app::HelloWorldModel model;
+
+  /* Load the model */
+  if (!model.Init()) {
+    printf_err(\"failed to initialise model\\n\");
+    return;
+  }
+  ```
+
+- Getting pointers to allocated input and output tensors.
+
+  ```c++
+  TfLiteTensor *outputTensor = model.GetOutputTensor();
+  TfLiteTensor *inputTensor = model.GetInputTensor();
+  ```
+
+- Copying input data to the input tensor. We assume input tensor size
+  to be 1000 uint8 elements.
+
+  ```c++
+  memcpy(inputTensor->data.data, inputData, 1000);
+  ```
+
+- Running inference
+
+  ```c++
+  model.RunInference();
+  ```
+
+- Reading inference results: data and data size from the output
+  tensor. We assume that output layer has uint8 data type.
+
+  ```c++
+  Const uint32_t tensorSz = outputTensor->bytes ;
+
+  const uint8_t *outputData = tflite::GetTensorData<uint8>(outputTensor);
+  ```
+
+Adding profiling for Ethos-U55 is easy. Include `Profiler.hpp` header and
+invoke `StartProfiling` and `StopProfiling` around inference
+execution.
+
+```c++
+Profiler profiler{&platform, "Inference"};
+
+profiler.StartProfiling();
+model.RunInference();
+profiler.StopProfiling();
+std::string profileResults = profiler.GetResultsAndReset();
+
+info("%s\n", profileResults.c_str());
+```
+
+## Printing to console
+
+Provided examples already used some function to print messages to the
+console. The full list of available functions:
+
+- `printf`
+- `trace` - printf wrapper for tracing messages
+- `debug` - printf wrapper for debug messages
+- `info` - printf wrapper for informational messages
+- `warn` - printf wrapper for warning messages
+- `printf_err` - printf wrapper for error messages
+
+`printf` wrappers could be switched off with `LOG_LEVEL` define:
+
+trace (0) < debug (1) < info (2) < warn (3) < error (4).
+
+Default output level is info = level 2.
+
+## Reading user input from console
+
+Platform data acquisition module has get_input function to read keyboard
+input from the UART. It can be used as follows:
+
+```c++
+char ch_input[128];
+platform.data_acq->get_input(ch_input, sizeof(ch_input));
+```
+
+The function will block until user provides an input.
+
+## Output to MPS3 LCD
+
+Platform presentation module has functions to print text or an image to
+the board LCD:
+
+- `present_data_text`
+- `present_data_image`
+
+Text presentation function has the following signature:
+
+- `const char* str`: string to print.
+- `const uint32_t str_sz`: string size.
+- `const uint32_t pos_x`: x coordinate of the first letter in pixels.
+- `const uint32_t pos_y`: y coordinate of the first letter in pixels.
+- `const uint32_t alow_multiple_lines`: signals whether the text is
+    allowed to span multiple lines on the screen, or should be truncated
+    to the current line.
+
+This function does not wrap text, if the given string cannot fit on the
+screen it will go outside the screen boundary.
+
+Example that prints "Hello world" on the LCD:
+
+```c++
+std::string hello("Hello world");
+platform.data_psn->present_data_text(hello.c_str(), hello.size(), 10, 35, 0);
+```
+
+Image presentation function has the following signature:
+
+- `uint8_t* data`: image data pointer;
+- `const uint32_t width`: image width;
+- `const uint32_t height`: image height;
+- `const uint32_t channels`: number of channels. Only 1 and 3 channels are supported now.
+- `const uint32_t pos_x`: x coordinate of the first pixel.
+- `const uint32_t pos_y`: y coordinate of the first pixel.
+- `const uint32_t downsample_factor`: the factor by which the image is to be down sampled.
+
+For example, the following code snippet visualizes an input tensor data
+for MobileNet v2 224 (down sampling it twice):
+
+```c++
+platform.data_psn->present_data_image((uint8_t *) inputTensor->data.data, 224, 224, 3, 10, 35, 2);
+```
+
+Please see [hal-api](#hal-api) section for other data presentation
+functions.
+
+## Building custom use case
+
+There is one last thing to do before building and running a use-case
+application: create a `usecase.cmake` file in the root of your use-case,
+the name of the file is not important.
+
+> **Convention:**  The build system searches for CMake file in each use-case directory and includes it into the build
+> flow. This file could be used to specify additional application specific build options, add custom build steps or
+> override standard compilation and linking flags.
+> Use `USER_OPTION` function to add additional build option. Prefix variable name with `${use_case}` (use-case name) to
+> avoid names collisions with other CMake variables.
+> Some useful variable names visible in use-case CMake file:
+>
+> - `DEFAULT_MODEL_PATH` – default model path to use if use-case specific `${use_case}_MODEL_TFLITE_PATH` is not set
+>in the build arguments.
+>- `TARGET_NAME` – name of the executable.
+> - `use_case` – name of the current use-case.
+> - `UC_SRC` – list of use-case sources.
+> - `UC_INCLUDE` – path to the use-case headers.
+> - `ETHOS_U55_ENABLED` – flag indicating if the current build supports Ethos-U55.
+> - `TARGET_PLATFORM` – Target platform being built for.
+> - `TARGET_SUBSYSTEM` – If target platform supports multiple subsystems, this is the name of the subsystem.
+> - All standard build options.
+>   - `CMAKE_CXX_FLAGS` and `CMAKE_C_FLAGS` – compilation flags.
+>   - `CMAKE_EXE_LINKER_FLAGS` – linker flags.
+
+For the hello world use-case it will be enough to create
+`helloworld.cmake` file and set DEFAULT_MODEL_PATH:
+
+```cmake
+if (ETHOS_U55_ENABLED EQUAL 1)
+  set(DEFAULT_MODEL_PATH  ${DEFAULT_MODEL_DIR}/helloworldmodel_uint8_vela.tflite)
+else()
+  set(DEFAULT_MODEL_PATH  ${DEFAULT_MODEL_DIR}/helloworldmodel_uint8.tflite)
+endif()
+```
+
+This can be used in subsequent section, for example:
+
+```cmake
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "Neural network model in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH
+    )
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+    )
+```
+
+This ensures that the model path pointed by `${use_case}_MODEL_TFLITE_PATH` is converted to a C++ array and is picked
+up by the build system. More information on auto-generations is available under section
+[Automatic file generation](./building.md#Automatic-file-generation).
+
+To build you application follow the general instructions from
+[Add Custom inputs](#add-custom-inputs) and specify the name of the use-case in the
+build command:
+
+```commandline
+cmake \
+  -DTARGET_PLATFORM=mps3 \
+  -DTARGET_SUBSYSTEM=sse-300 \
+  -DUSE_CASE_BUILD=hello_world \
+  -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+As a result, `ethos-u-hello_world.axf` should be created, MPS3 build
+will also produce `sectors/hello_world` directory with binaries and
+`images-hello_world.txt` to be copied to the board MicroSD card.
+
+Next section of the documentation: [Testing and benchmarking](../documentation.md#Testing-and-benchmarking).
diff --git a/docs/sections/deployment.md b/docs/sections/deployment.md
new file mode 100644
index 0000000..354d30b
--- /dev/null
+++ b/docs/sections/deployment.md
@@ -0,0 +1,281 @@
+# Deployment
+
+- [Fixed Virtual Platform](#fixed-virtual-platform)
+  - [Setting up the MPS3 Arm Corstone-300 FVP](#setting-up-the-mps3-arm-corstone-300-fvp)
+  - [Deploying on an FVP emulating MPS3](#deploying-on-an-fvp-emulating-mps3)
+- [MPS3 board](#mps3-board)
+  - [Deployment on MPS3 board](#deployment-on-mps3-board)
+
+The sample application for Arm® Ethos™-U55 can be deployed on two
+target platforms, both of which implement the Arm® Corstone™-300 design (see
+<https://www.arm.com/products/iot/soc/corstone-300>):
+
+- A physical Arm MPS3 FPGA prototyping board
+
+- An MPS3 FVP
+
+## Fixed Virtual Platform
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads
+](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+Download the correct archive from the list under `Arm Corstone-300`. We need the one which:
+
+- Emulates MPS3 board (not for MPS2 FPGA board)
+- Contains support for Arm® Ethos™-U55
+
+> **Note:** Currently, the FVP only has a Linux OS version. Also, there are no FVPs available for `SSE-200`
+> which satisfy the above conditions.
+
+For FVP, the elf or the axf file can be run using the Fast Model
+executable as outlined under the [Starting Fast Model simulation](./setup.md/#starting-fast-model-simulation)
+except for the binary being pointed at here
+is the one just built using the steps in the previous section.
+
+### Setting up the MPS3 Arm Corstone-300 FVP
+
+For Ethos-U55 sample application, please download the MPS3 version of the
+Arm® Corstone™-300 model that contains Ethos-U55 and Arm® Cortex®-M55. The model is
+currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+    `./FVP_Corstone_SSE-300_Ethos-U55.sh`
+
+- Follow the instructions to install the FVP to your desired location
+
+### Deploying on an FVP emulating MPS3
+
+This section assumes that the FVP has been installed (see [Setting up the MPS3 Arm Corstone-300 FVP](#Setting-up-the-MPS3-Arm-Corstone-300-FVP)) to the user's home directory `~/FVP_Corstone_SSE-300_Ethos-U55`.
+
+The installation, typically, will have the executable under `~/FVP_Corstone_SSE-300_Ethos-U55/model/<OS>_<compiler-version>/`
+directory. For the example below, we assume it to be `~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4`.
+
+To run a use case on the FVP, from the [Build directory](../sections/building.md#Create-a-build-directory):
+
+```commandline
+~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-<use_case>.axf
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+
+    Ethos-U rev 0 --- Oct 13 2020 11:27:45
+    (C) COPYRIGHT 2019-2020 Arm Limited
+    ALL RIGHTS RESERVED
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started it outputs a menu and waits for the user input from telnet terminal.
+
+For example, the image classification use case can be started by:
+
+```commandline
+~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-img_class.axf
+```
+
+The FVP supports many command line parameters:
+
+- passed by using `-C <param>=<value>`. The most important ones are:
+  - `ethosu.num_macs`: Sets the Ethos-U55 configuration for the model. Valid parameters are `32`, `64`, `256`,
+    and the default one `128`. The number signifies the 8x8 MACs performed per cycle count available on the hardware.
+  - `cpu0.CFGITCMSZ`: ITCM size for the Cortex-M CPU. Size of ITCM is pow(2, CFGITCMSZ - 1) KB
+  - `cpu0.CFGDTCMSZ`: DTCM size for the Cortex-M CPU. Size of DTCM is pow(2, CFGDTCMSZ - 1) KB
+  - `mps3_board.telnetterminal0.start_telnet` : Starts the telnet session if nothing connected.
+  - `mps3_board.uart0.out_file`: Sets the output file to hold data written by the UART
+    (use '-' to send all output to stdout, empty by default).
+  - `mps3_board.uart0.shutdown_on_eot`: Sets to shutdown simulation when a EOT (ASCII 4) char is transmitted.
+  - `mps3_board.visualisation.disable-visualisation`: Enables or disables visualisation (disabled by default).
+
+  To start the model in `128` mode for Ethos-U55:
+
+    ```commandline
+    ~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-img_class.axf -C ethosu.num_macs=128
+    ```
+
+- `-l`: shows the full list of supported parameters
+
+    ```commandline
+    ~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -l
+    ```
+
+- `--stat`: prints some run statistics on simulation exit
+
+    ```commandline
+    ~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 --stat
+    ```
+
+- `--timelimit`: sets the number of wall clock seconds for the simulator to run, excluding startup and shutdown.
+
+## MPS3 board
+
+> **Note:**  Before proceeding, make sure you have the MPS3 board powered on,
+and USB A to B connected between your machine and the MPS3.
+The connector on the MPS3 is marked as "Debug USB".
+
+![MPS3](../media/mps3.png)
+
+1. MPS3 board top view.
+
+Once the board has booted, the micro SD card will enumerate as a mass
+storage device. On most systems this will be automatically mounted, but
+you might need to mount it manually.
+
+Also, there should be four serial-over-USB ports available for use via
+this connection. On Linux based machines, these would typically be
+*/dev/ttyUSB\<n\>* to */dev/ttyUSB\<n+3\>*.
+
+The default configuration for all of them is 115200, 8/N/1 (15200 bauds,
+8 bits, no parity and 1 stop bit) with no flow control.
+
+> **Note:** For Windows machines, additional FTDI drivers might need to be installed
+for these serial ports to be available.
+For more information on getting started with an MPS3 board, please refer to
+<https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/MPS3GettingStarted.pdf>
+
+### Deployment on MPS3 board
+
+> **NOTE**: These instructions are valid only if the evaluation is being
+ done using the MPS3 FPGA platform using either `SSE-200` or `SSE-300`.
+
+To run the application on MPS3 platform, firstly it's necessary to make sure
+that the platform has been set up using the correct configuration.
+For details, on platform set up, please see the relevant documentation. For `Arm Corstone-300`, this is available
+[here](https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/DAI0547B_SSE300_PLUS_U55_FPGA_for_mps3.pdf?revision=d088d931-03c7-40e4-9045-31ed8c54a26f&la=en&hash=F0C7837C8ACEBC3A0CF02D871B3A6FF93E09C6B8).
+
+For MPS3 board, instead of loading the axf file directly, the executable blobs
+generated under the *sectors/<use_case>* subdirectory need to be
+copied over to the MP3 board's micro SD card. Also, every use case build
+generates a corresponding images.txt file which is used by the MPS3 to
+understand which memory regions the blobs are to be loaded into.
+
+Once the USB A <--> B cable between the MPS3 and the development machine
+is connected and the MPS3 board powered on, the board should enumerate
+as a mass storage device over this USB connection.
+There might be two devices also, depending on the version of the board
+you are using. The device named `V2M-MPS3` or `V2MMPS3` is the `SD card`.
+
+If the axf/elf file is within 1MiB, it can be flashed into the FPGA
+memory directly without having to break it down into separate load
+region specific blobs. However, with neural network models exceeding
+this size, it becomes necessary to follow this approach.
+
+1. For example, the image classification use case will produce:
+
+    ```tree
+    ./bin/sectors/
+        └── img_class
+            ├── dram.bin
+            └── itcm.bin
+    ```
+
+    For example, if the micro SD card is mounted at
+    /media/user/V2M-MPS3/:
+
+    ```commandline
+    cp -av ./bin/sectors/img_class/* /media/user/V2M-MPS3/SOFTWARE/
+    ```
+
+2. The generated `\<use-case\>_images.txt` file needs to be copied
+over to the MPS3. The exact location for the destination will depend
+on the MPS3 board's version and the application note for the bit
+file in use.
+For example, for MPS3 board hardware revision C, using an
+application note directory named "ETHOSU", to replace the images.txt
+file:
+
+    ```commandline
+    cp ./bin/images-img_class.txt /media/user/V2M-MPS3/MB/HBI0309C/ETHOSU/images.txt
+    ```
+
+3. Open the first serial port available from MPS3, for example,
+"/dev/ttyUSB0". This can be typically done using minicom, screen or
+Putty application. Make sure the flow control setting is switched
+off.
+
+    ```commandline
+    minicom --D /dev/ttyUSB0
+    ```
+
+    ```log
+    Welcome to minicom 2.7.1
+    OPTIONS: I18n
+    Compiled on Aug 13 2017, 15:25:34.
+    Port /dev/ttyUSB0, 16:05:34
+    Press CTRL-A Z for help on special keys
+    Cmd>
+    ```
+
+4. In another terminal, open the second serial port, for example,
+    "/dev/ttyUSB1":
+
+    ```commandline
+    minicom --D /dev/ttyUSB1
+    ```
+
+5. On the first serial port, issue a "reboot" command and press the
+    return key
+
+    ```commandline
+    $ Cmd> reboot
+    ```
+
+    ```log
+    Rebooting...Disabling debug USB..Board rebooting...
+
+    ARM V2M-MPS3 Firmware v1.3.2
+    Build Date: Apr 20 2018
+
+    Powering up system...
+    Switching on main power...
+    Configuring motherboard (rev C, var A)...
+    ```
+
+    This will go on to reboot the board and prime the application to run by
+    flashing the binaries into their respective FPGA memory locations. For example:
+
+    ```log
+    Reading images file \MB\HBI0309C\ETHOSU\images.txt
+    Writing File \SOFTWARE\itcm.bin to Address 0x00000000
+
+    ............
+
+    File \SOFTWARE\itcm.bin written to memory address 0x00000000
+    Image loaded from \SOFTWARE\itcm.bin
+    Writing File \SOFTWARE\dram.bin to Address 0x08000000
+
+    ..........................................................................
+
+
+    File \SOFTWARE\dram.bin written to memory address 0x08000000
+    Image loaded from \SOFTWARE\dram.bin
+    ```
+
+6. When the reboot from previous step is completed, issue a reset
+        command on the command prompt.
+
+    ``` commandline
+    $ Cmd> reset
+    ```
+
+    This will trigger the application to start, and the output should be visible on the second serial connection.
+
+7. On the second serial port, output similar to section 2.2 should be visible:
+
+    ```log
+    [INFO] Setting up system tick IRQ (for NPU)
+    [INFO] V2M-MPS3 revision C
+    [INFO] Application Note AN540, Revision B
+    [INFO] FPGA build 1
+    [INFO] Core clock has been set to: 32000000 Hz
+    [INFO] CPU ID: 0x410fd220
+    [INFO] CPU: Cortex-M55 r0p0
+    ...
+    ```
+
+
+Next section of the main documentation, [Running code samples applications](../documentation.md#Running-code-samples-applications).
diff --git a/docs/sections/run.md b/docs/sections/run.md
new file mode 100644
index 0000000..90ee7c8
--- /dev/null
+++ b/docs/sections/run.md
@@ -0,0 +1,42 @@
+
+# Running Ethos-U55 Code Samples
+
+- [Starting Fast Model simulation](#starting-fast-model-simulation)
+
+This section covers the process for getting started with pre-built binaries for the Code Samples.
+
+## Starting Fast Model simulation
+
+Once built application binaries and assuming the install location of the FVP
+was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-<use_case>.axf
+```
+
+This will start the Fast Model simulation for the chosen use-case.
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's
+standard output and error log entries containing information about the
+pre-built application version, TensorFlow Lite Micro library version
+used, data type as well as the input and output tensor sizes of the
+model compiled into the executable binary.
+
+![FVP](../media/fvp.png)
+
+![FVP Terminal](../media/fvpterminal.png)
+
+> **Note:**
+For details on the specific use-case follow the instructions in the corresponding documentation.
+
+Next section of the documentation: [Implementing custom ML application](../documentation.md#Implementing-custom-ML-application).
diff --git a/docs/sections/testing_benchmarking.md b/docs/sections/testing_benchmarking.md
new file mode 100644
index 0000000..43bb7f4
--- /dev/null
+++ b/docs/sections/testing_benchmarking.md
@@ -0,0 +1,87 @@
+# Testing and benchmarking
+
+- [Testing](#testing)
+- [Benchmarking](#benchmarking)
+
+## Testing
+
+The `tests` folder has the following structure:
+
+```tree
+.
+├── common
+│   └── ...
+├── use_case
+│   ├── <usecase1>
+│   │   └── ...
+│   ├── <usecase2>
+│   │   └── ...
+└── utils
+    └── ...
+```
+
+Where:
+
+- `common`: contains tests for generic and common appplication functions.
+- `use_case`: contains all the use case specific tests in the respective folders.
+- `utils`: contains utilities sources used only within the tests.
+
+When [configuring](./building.md#configuring-the-build-native-unit-test) and
+[building](./building.md#Building-the-configured-project) for `native` target platform results of the build will
+be placed under `build/bin/` folder, for example:
+
+```tree
+.
+├── dev_ethosu_eval-<usecase1>-tests
+├── dev_ethosu_eval-<usecase2>-tests
+├── ethos-u-<usecase1>
+└── ethos-u-<usecase1>
+```
+
+To execute unit-tests for a specific use-case in addition to the common tests:
+
+```commandline
+dev_ethosu_eval-<use_case>-tests
+```
+
+```log
+[INFO] native platform initialised
+[INFO] ARM Ethos-U55 Evaluation application for MPS3 FPGA Prototyping Board and FastModel
+
+...
+===============================================================================
+   All tests passed (37 assertions in 7 test cases)
+```
+
+Tests output could have `[ERROR]` messages, that's alright - they are coming from negative scenarios tests.
+
+## Benchmarking
+
+Profiling is enabled by default when configuring the project. This will enable displaying:
+
+- the active and idle NPU cycle counts when Arm® Ethos™-U55 is enabled (see `-DETHOS_U55_ENABLED` in
+  [Build options](./building.md#build-options).
+- CPU cycle counts and/or in milliseconds elapsed for inferences performed if CPU profiling is enabled
+  (see `-DCPU_PROFILE_ENABLED` in [Build options](./building.md#build-options). This should be done only
+  when running on a physical FPGA board as the FVP does not contain a cycle-approximate or cycle-accurate Cortex-M model.
+
+For example:
+
+- On the FVP:
+
+```log
+    Active NPU cycles: 5475412
+    Idle NPU cycles:   702
+```
+
+- For MPS3 platform, the time duration in milliseconds is also reported when `-DCPU_PROFILE_ENABLED=1` is added to
+  CMake configuration command:
+
+```log
+    Active NPU cycles: 5629033
+    Idle NPU cycles:   1005276
+    Active CPU cycles: 993553 (approx)
+    Time in ms:        210
+```
+
+Next section of the main documentation: [Troubleshooting](../documentation.md#Troubleshooting).
diff --git a/docs/sections/troubleshooting.md b/docs/sections/troubleshooting.md
new file mode 100644
index 0000000..40b975a
--- /dev/null
+++ b/docs/sections/troubleshooting.md
@@ -0,0 +1,27 @@
+# Troubleshooting
+
+- [Inference results are incorrect for my custom files](#inference-results-are-incorrect-for-my-custom-files)
+- [The application does not work with my custom model](#the-application-does-not-work-with-my-custom-model)
+
+## Inference results are incorrect for my custom files
+
+Ensure that the files you are using match the requirements of the model
+you are using and that cmake parameters are set accordingly. More
+information on these cmake parameters is detailed in their separate
+sections. Note that preprocessing of the files could also affect the
+inference result, such as the rescaling and padding operations done for
+image classification.
+
+## The application does not work with my custom model
+
+Ensure that your model is in a fully quantized `.tflite` file format,
+either uint8 or int8, and has successfully been run through the Vela
+compiler.
+
+Check that cmake parameters match your new models input requirements.
+
+> **Note:** Vela tool is not available within this software project.
+It is a python tool available from <https://pypi.org/project/ethos-u-vela/>.
+The source code is hosted on <https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/>.
+
+Next section of the documentation: [Contribution guidelines](../documentation.md#Contribution-guidelines).
diff --git a/docs/use_cases/ad.md b/docs/use_cases/ad.md
new file mode 100644
index 0000000..ca95af8
--- /dev/null
+++ b/docs/use_cases/ad.md
@@ -0,0 +1,523 @@
+# Anomaly Detection Code Sample
+
+  - [Introduction](#introduction)
+    - [Prerequisites](#prerequisites)
+  - [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+    - [Build options](#build-options)
+    - [Build process](#build-process)
+    - [Add custom input](#add-custom-input)
+    - [Add custom model](#add-custom-model)
+  - [Setting-up and running Ethos-U55 Code Sample](#setting-up-and-running-ethos-u55-code-sample)
+    - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+    - [Starting Fast Model simulation](#starting-fast-model-simulation)
+    - [Running Anomaly Detection](#running-anomaly-detection)
+  - [Anomaly Detection processing information](#anomaly-detection-processing-information)
+    - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+    - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Anomaly Detection example.
+
+Use case code could be found in [source/use_case/ad](../../source/use_case/ad]) directory.
+
+### Preprocessing and feature extraction
+
+The Anomaly Detection model that is used with the Code Samples expects audio data to be preprocessed
+in a specific way before performing an inference. This section aims to provide an overview of the feature extraction
+process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+Next, a window of 1024 audio samples are taken from the start of the audio clip. From these 1024 samples we calculate 64
+Log Mel Energies that form part of a Log Mel Spectrogram.
+
+The window is shifted by 512 audio samples and another 64 Log Mel Energies are calculated. This is repeated until we
+have 64 sets of Log Mel Energies.
+
+This 64x64 matrix of values is resized by a factor of 2 resulting in a 32x32 matrix of values.
+
+The average of the training dataset is subtracted from this 32x32 matrix and an inference can then be performed.
+
+We start this process again but shifting the start by 20\*512=10240 audio samples. This keeps repeating until enough
+inferences have been performed to cover the whole audio clip.
+
+### Postprocessing
+
+Softmax is applied to the result of each inference. Based on the machine ID of the wav clip being processed we look at a
+specific index in each output vector. An average of the negative value at this index across all the inferences performed
+for the audio clip is taken. If this average value is greater than a chosen threshold score, then the machine in the
+clip is not behaving anomalously. If the score is lower than the threshold then the machine in the clip is behaving
+anomalously.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Anomaly Detection use case adds:
+
+- `ad_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into
+the application axf
+    file. The default value points to one of the delivered set of models. Note that the parameters `ad_LABELS_TXT_FILE`,
+    `TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally fall
+back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+model in this case will result in a runtime error.
+
+- `ad_FILE_PATH`: Path to the directory containing audio files, or a path to single WAV file, to be used in the
+    application. The default value points to the resources/ad/samples folder containing the delivered set of audio clips.
+
+- `ad_AUDIO_RATE`: Input data sampling rate. Each audio file from ad_FILE_PATH is preprocessed during the build to match
+NN model input requirements.
+    Default value is 16000.
+
+- `ad_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `ad_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `ad_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning the
+    whole audio file will be taken.
+
+- `ad_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter than
+    this number, it is padded with zeros. Default value is 16000.
+
+- `ad_MODEL_SCORE_THRESHOLD`: Threshold value to be applied to average softmax score over the clip, if larger than this
+score we have an anomaly.
+
+- `ad_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is set to
+    2MiB and should be enough for most models.
+
+In order to build **ONLY** Anomaly Detection example application add to the `cmake` command line specified in [Building](../documentation.md#Building) `-DUSE_CASE_BUILD=ad`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target
+>platform see [Building](../documentation.md#Building).
+
+Create a build directory folder and navigate inside:
+
+```commandline
+mkdir build_ad && cd build_ad
+```
+
+On Linux, execute the following command to build **only** Anomaly Detection application to run on the Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-.axf
+ â”œâ”€â”€ ethos-u-ad.htm
+ â”œâ”€â”€ ethos-u-.map
+ â”œâ”€â”€ images-ad.txt
+ â””── sectors
+      └── ad
+          ├── dram.bin
+          └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-ad.axf`: The built application binary for the Anomaly Detection use case.
+
+- `ethos-u-ad.map`: Information from building the application (e.g. libraries used, what was optimized, location of
+    objects)
+
+- `ethos-u-ad.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-ad.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/\*\* folder.
+
+### Add custom input
+
+The application anomaly detection on audio data found in the folder, or an individual file, set by the CMake parameter
+``ad_FILE_PATH``.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom clips into
+this folder:
+
+```commandline
+mkdir /tmp/custom_files
+
+cp custom_id_00.wav /tmp/custom_files/
+```
+
+> **Note:** The data used for this example comes from
+[https://zenodo.org/record/3384388\#.X6GILFNKiqA](https://zenodo.org/record/3384388\#.X6GILFNKiqA)
+and the model included in this example is trained on the ‘Slider’ part of the dataset.
+The machine ID (00, 02, 04, 06) the clip comes from must be in the file name for the application to work.
+The file name should have a pattern that matches
+e.g. `<any>_<text>_00_<here>.wav` if the audio was from machine ID 00
+or `<any>_<text>_02_<here>.wav` if it was from machine ID 02 etc.
+>
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set ad_FILE_PATH to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dad_FILE_PATH=/tmp/custom_files/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The images found in the _DIR folder will be picked up and automatically converted to C++ files during the CMake
+configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what image directory path has been used:
+
+```log
+-- User option ad_FILE_PATH is set to /tmp/custom_files
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter ``ad_MODEL_TFLITE_PATH``.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler
+>successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+An example:
+
+```commandline
+cmake \
+    -Dad_MODEL_TFLITE_PATH=<path/to/custom_ad_model_after_vela.tflite> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `ad_MODEL_TFLITE_PATH` will be converted
+to C++ files during the CMake configuration
+stage and then compiled into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path has been used:
+
+```log
+-- User option TARGET_PLATFORM is set to fastmodel
+-- User option ad_MODEL_TFLITE_PATH is set to <path/to/custom_ad_model_after_vela.tflite>
+...
+-- Using <path/to/custom_ad_model_after_vela.tflite>
+++ Converting custom_ad_model_after_vela.tflite to custom_ad_model_after_vela.tflite.cc
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+ >**Note:** In order to successfully run the model, the NPU needs to be enabled and
+ the platform `TARGET_PLATFORM` is set to mps3 and TARGET_SUBSYSTEM is SSE-200 or SSE-300.
+
+## Setting-up and running Ethos-U55 Code Sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads
+](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+.FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+> **Note:** The anomaly detection example does not come pre-built. You will first need to follow the instructions in
+>section 3 for building the application from source.
+
+After building, and assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be
+started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 ./bin/ethos-u-ad.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `ad_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from
+telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run single inference on the next in line.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+    By default, pre-built application has 4 files, indexes from 0 to 3.
+
+3. “Run ... on all” menu option triggers sequential inference executions on all built-in .
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 1024 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:  32
+    [INFO] 		2:  32
+    [INFO] 		3:   1
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.192437
+    [INFO] ZeroPoint[0] = 11
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 8 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   8
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.048891
+    [INFO] ZeroPoint[0] = -30
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 198016
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+
+    ```
+
+5. “List” menu option prints a list of pair ... indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 =>; anomaly_id_00_00000000.wav
+    [INFO] 1 =>; anomaly_id_02_00000076.wav
+    [INFO] 2 =>; normal_id_00_00000004.wav
+    [INFO] 3 =>; normal_id_02_00000001.wav
+    ```
+
+### Running Anomaly Detection
+
+Please select the first menu option to execute Anomaly Detection.
+
+The following example illustrates application output:
+
+```log
+[INFO] Running inference on audio clip 0 => anomaly_id_00_00000000.wav
+[INFO] Inference 1/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081154
+	Idle NPU cycles:   1012
+
+[INFO] Inference 2/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080934
+	Idle NPU cycles:   232
+
+[INFO] Inference 3/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081332
+	Idle NPU cycles:   834
+
+[INFO] Inference 4/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080748
+	Idle NPU cycles:   418
+
+[INFO] Inference 5/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080728
+	Idle NPU cycles:   438
+
+[INFO] Inference 6/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081144
+	Idle NPU cycles:   1022
+
+[INFO] Inference 7/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080924
+	Idle NPU cycles:   242
+
+[INFO] Inference 8/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081322
+	Idle NPU cycles:   844
+
+[INFO] Inference 9/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080738
+	Idle NPU cycles:   428
+
+[INFO] Inference 10/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080718
+	Idle NPU cycles:   448
+
+[INFO] Inference 11/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081134
+	Idle NPU cycles:   1032
+
+[INFO] Inference 12/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080914
+	Idle NPU cycles:   252
+
+[INFO] Inference 13/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081312
+	Idle NPU cycles:   854
+
+[INFO] Average anomaly score is: -0.024493
+Anomaly threshold is: -0.800000
+Anomaly detected!
+
+```
+
+As multiple inferences have to be run for one clip it will take around a minute or so for all inferences to complete.
+
+For the anomaly_id_00_00000000.wav clip, after averaging results across all inferences the score is greater than the
+chosen anomaly threshold so an anomaly was detected with the machine in this clip.
+
+The profiling section of the log shows that for each inference. For the last inference the profiling reports:
+
+- Ethos-U55's PMU report:
+
+  - 1,081,312 active cycles: number of cycles that were used for computation
+
+  - 854 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
diff --git a/docs/use_cases/asr.md b/docs/use_cases/asr.md
new file mode 100644
index 0000000..d224aca
--- /dev/null
+++ b/docs/use_cases/asr.md
@@ -0,0 +1,529 @@
+# Automatic Speech Recognition Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 Code Sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Automatic Speech Recognition](#running-automatic-speech-recognition)
+- [Automatic Speech Recognition processing information](#automatic-speech-recognition-processing-information)
+  - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+  - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Automatic Speech Recognition example.
+
+Use case code could be found in [source/use_case/asr](../../source/use_case/asr]) directory.
+
+### Preprocessing and feature extraction
+
+The wav2letter automatic speech recognition model that is used with the Code Samples expects audio data to be
+preprocessed in a specific way before performing an inference. This section aims to provide an overview of the feature
+extraction process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as
+>input for machine learning tasks like keyword spotting and speech recognition. See source/application/main/include/Mfcc.hpp
+>for implementation details.
+
+Next, a window of 512 audio samples is taken from the start of the audio clip. From these 512 samples we calculate 13
+MFCC features.
+
+The whole window is shifted to the right by 160 audio samples and 13 new MFCC features are calculated. This process of
+shifting and calculating is repeated until enough audio samples to perform an inference have been processed. In total
+this will be 296 windows that each have 13 MFCC features calculated for them.
+
+After extracting MFCC features the first and second order derivatives of these features with respect to time are
+calculated. These derivative features are then standardized and concatenated with the MFCC features (which also get
+standardized). At this point the input tensor will have a shape of 296x39.
+
+These extracted features are quantized, and an inference is performed.
+
+![ASR preprocessing](../media/ASR_preprocessing.png)
+
+For longer audio clips where multiple inferences need to be performed, then the initial starting position is offset by
+(100*160) = 16000 audio samples. From this new starting point, MFCC and derivative features are calculated as before
+until there is enough to perform another inference. Padding can be used if there are not enough audio samples for at
+least 1 inference. This step is repeated until the whole audio clip has been processed. If there are not enough audio
+samples for a final complete inference the MFCC features will be padded by repeating the last calculated feature until
+an inference can be performed.
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on
+>what was used during model training. These values are specific to each model. If you switch to a different ASR model
+>than the one supplied, then the feature extraction process could be completely different to the one currently implemented.
+
+The amount of audio samples we offset by for long audio clips is specific to the included wav2letter model.
+
+### Postprocessing
+
+After performing an inference, the raw output need to be postprocessed to get a usable result.
+
+The raw output from the model is a tensor of shape 148x29 where each row is a probability distribution over the possible
+29 characters that can appear at each of the 148 time steps.
+
+This wav2letter model is trained using context windows, this means that only certain parts of the output are usable
+depending on the bit of the audio clip that is currently being processed.
+
+If this is the first inference and multiple inferences are required, then ignore the final 49 rows of the output.
+Similarly, if this is the final inference from multiple inferences then ignore the first 49 rows of the output. Finally,
+if this inference is not the last or first inference then ignore the first and last 49 rows of the model output.
+
+> **Note:** If the audio clip is small enough then the whole of the model output is usable and there is no need to throw
+>away any of the output before continuing.
+
+Once any rows have been removed the final processing can be done. To process the output, first the letter with the
+highest probability at each time step is found. Next, any letters that are repeated multiple times in a row are removed
+(e.g. [t, t, t, o, p, p] becomes [t, o, p]). Finally, the 29th blank token letter is removed from the output.
+
+For the final output, the result from all inferences are combined before decoding. What you are left with is then
+displayed to the console.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Automatic Speech Recognition use case
+adds:
+
+- `asr_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into the
+application axf file. The default value points to one of the delivered set of models. Note that the parameters
+`asr_LABELS_TXT_FILE`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally
+fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+model in this case will result in a runtime error.
+
+- `asr_FILE_PATH`:  Path to the directory containing audio files, or a path to single WAV file, to be used in the
+    application. The default value points
+    to the resources/asr/samples folder containing the delivered set of audio clips.
+
+- `asr_LABELS_TXT_FILE`: Path to the labels' text file. The file is used to map letter class index to the text label.
+    The default value points to the delivered labels.txt file inside the delivery package.
+
+- `asr_AUDIO_RATE`: Input data sampling rate. Each audio file from asr_FILE_PATH is preprocessed during the build to
+    match NN model input requirements. Default value is 16000.
+
+- `asr_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `asr_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `asr_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning the
+    whole audio file will be taken.
+
+- `asr_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter than
+    this number, it is padded with zeros. Default value is 16000.
+
+- `asr_MODEL_SCORE_THRESHOLD`: Threshold value that must be applied to the inference results for a label to be
+    deemed valid. Default is 0.5.
+
+- `asr_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is set
+    to 2MiB and should be enough for most models.
+
+In order to build **ONLY** automatic speech recognition example application add to the `cmake` command line specified in
+[Building](../documentation.md#Building) `-DUSE_CASE_BUILD=asr`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target
+>platform see [Building](../documentation.md#Building) section.
+
+In order to build **only** the automatic speech recognition example, create a build directory and navigate inside:
+
+```commandline
+mkdir build_asr && cd build_asr
+```
+
+On Linux, execute the following command to build **only** Automatic Speech Recognition application to run on the
+Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+>[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add `VERBOSE=1` to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-asr.axf
+ â”œâ”€â”€ ethos-u-asr.htm
+ â”œâ”€â”€ ethos-u-asr.map
+ â”œâ”€â”€ images-asr.txt
+ â””── sectors
+      └── asr
+          ├── dram.bin
+          └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-asr.axf`: The built application binary for the Automatic Speech Recognition use case.
+
+- `ethos-u-asr.map`: Information from building the application (e.g. libraries used, what was optimized, location of
+    objects)
+
+- `ethos-u-asr.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-asr.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/** folder.
+
+### Add custom input
+
+The application performs inference on audio data found in the folder, or an individual file, set by the CMake parameter
+`asr_FILE_PATH`.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom audio clips
+into this folder:
+
+```commandline
+mkdir /tmp/custom_wavs
+
+cp my_clip.wav /tmp/custom_wavs/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `asr_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dasr_FILE_PATH=/tmp/custom_wavs/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DUSE_CASE_BUILD=asr \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The audio clips found in the `asr_FILE_PATH` folder will be picked up and automatically converted to C++ files during the
+CMake configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what audio clip directory path has been used:
+
+```log
+-- User option asr_FILE_PATH is set to /tmp/custom_wavs
+-- Generating audio files from /tmp/custom_wavs
+++ Converting my_clip.wav to my_clip.cc
+++ Generating build/generated/asr/include/InputFiles.hpp
+++ Generating build/generated/asr/src/InputFiles.cc
+-- Defined build user options:
+-- asr_FILE_PATH=/tmp/custom_wavs
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter asr_AUDIO_MIN_SAMPLES determine the minimum number of input sample. When building the
+>application, if the size of the audio clips is less then asr_AUDIO_MIN_SAMPLES then it will be padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter MODEL_TFLITE_PATH.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela
+>compiler successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+labels_wav2letter.txt file for an example.
+
+Then, you must set `asr_MODEL_TFLITE_PATH` to the location of the Vela processed model file and `asr_LABELS_TXT_FILE`to
+the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dasr_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -Dasr_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `asr_MODEL_TFLITE_PATH` and labels text file pointed to by `asr_LABELS_TXT_FILE`
+will be converted to C++ files during the CMake configuration stage and then compiled into the application for performing
+inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option TARGET_PLATFORM is set to mps3
+-- User option asr_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option asr_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 Code Sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads
+](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-asr.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-asr.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `asr_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from
+telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run inference on the next in line voice clip from the collection of the
+    compiled audio.
+
+    > **Note:** Note that if the clip is over a certain length, the application will invoke multiple inference runs to
+>cover the entire file.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+    By default, pre-built application has 4 files, indexes from 0 to 3.
+
+3. “Run classification on all audio clips” menu option triggers sequential inference executions on all built-in voice
+    samples.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 11544 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 296
+    [INFO] 		2:  39
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.110316
+    [INFO] ZeroPoint[0] = -11
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 4292 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2: 148
+    [INFO] 		3:  29
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 783168
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+    ```
+
+5. “List” menu option prints a list of pair audio clip indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => anotherdoor.wav
+    [INFO] 1 => anotherengineer.wav
+    [INFO] 2 => itellyou.wav
+    [INFO] 3 => testingroutine.wav
+    ```
+
+### Running Automatic Speech Recognition
+
+Please select the first menu option to execute Automatic Speech Recognition.
+
+The following example illustrates application output:
+
+```log
+[INFO] Running inference on audio clip 0 => anotherdoor.wav
+[INFO] Inference 1/2
+[INFO] Profile for pre-processing:
+	Active NPU cycles: 0
+	Idle NPU cycles:   6
+
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924342
+	Idle NPU cycles:   824
+
+[INFO] Inference 2/2
+[INFO] Profile for pre-processing:
+	Active NPU cycles: 0
+	Idle NPU cycles:   6
+
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924298
+	Idle NPU cycles:   868
+
+[INFO] Result for inf 0: and he walked immediately out o t
+[INFO] Result for inf 1: he aparctment by anoer dor
+[INFO] Final result: and he walked immediately out o the aparctment by anoer dor
+```
+
+It could take several minutes to complete each inference (average time is 5-7 minutes), and on this audio clip multiple
+inferences were required to cover the whole clip.
+
+The profiling section of the log shows that for the first inference:
+
+- Ethos-U55's PMU report:
+
+  - 28,924,298 active cycles: number of NPU cycles that were used for computation
+
+  - 868 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the decoded output from each of the inference runs as well as the final combined result.
diff --git a/docs/use_cases/img_class.md b/docs/use_cases/img_class.md
new file mode 100644
index 0000000..7a409f2
--- /dev/null
+++ b/docs/use_cases/img_class.md
@@ -0,0 +1,446 @@
+# Image Classification Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 code sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Image Classification](#running-image-classification)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Image Classification
+example.
+
+Use case solves classical computer vision problem: image classification. The ML sample was developed using MobileNet v2
+model trained on ImageNet dataset.
+
+Use case code could be found in [source/use_case/img_class](../../source/use_case/img_class]) directory.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Image Classification use case specifies:
+
+- `img_class_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into
+    the application axf file. The default value points to one of the delivered set of models. Note that the parameters
+    `img_class_LABELS_TXT_FILE`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally
+    fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+    model in this case will result in a runtime error.
+
+- `img_class_FILE_PATH`: Path to the directory containing images, or path to a single image file, to be used file(s) in
+    the application. The default value points to the resources/img_class/samples folder containing the delivered
+    set of images. See more in the [Add custom input data section](#add-custom-input).
+
+- `img_class_IMAGE_SIZE`: The NN model requires input images to be of a specific size. This parameter defines the
+    size of the image side in pixels. Images are considered squared. Default value is 224, which is what the supplied
+    MobilenetV2-1.0 model expects.
+
+- `img_class_LABELS_TXT_FILE`: Path to the labels' text file to be baked into the application. The file is used to
+    map classified classes index to the text label. Change this parameter to point to the custom labels file to map
+    custom NN model output correctly.\
+    The default value points to the delivered labels.txt file inside the delivery package.
+
+- `img_class_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it
+    is set to 2MiB and should be enough for most models.
+
+- `USE_CASE_BUILD`: set to img_class to build only this example.
+
+In order to build **ONLY** Image Classification example application add to the `cmake` command line specified in
+[Building](../documentation.md#Building) `-DUSE_CASE_BUILD=img_class`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform
+see [Building](../documentation.md#Building).
+
+Create a build directory folder and navigate inside:
+
+```commandline
+mkdir build_img_class && cd build_img_class
+```
+
+On Linux, execute the following command to build **only** Image Classification application to run on the Ethos-U55 Fast
+Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+>[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeds, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-img_class.axf
+ â”œâ”€â”€ ethos-u-img_class.htm
+ â”œâ”€â”€ ethos-u-img_class.map
+ â”œâ”€â”€ images-img_class.txt
+ â””── sectors
+      └── img_class
+           ├── dram.bin
+           └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-img_class.axf`: The built application binary for the Image Classification use case.
+
+- `ethos-u-img_class.map`: Information from building the application (e.g. libraries used, what was optimized, location
+    of objects)
+
+- `ethos-u-img_class.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-img_class.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/** folder.
+
+### Add custom input
+
+The application performs inference on input data found in the folder, or an individual file set by the CMake parameter
+img_class_FILE_PATH.
+
+To run the application with your own images, first create a folder to hold them and then copy the custom images into
+this folder, for example:
+
+```commandline
+mkdir /tmp/custom_images
+
+cp custom_image1.bmp /tmp/custom_images/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `img_class_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dimg_class_FILE_PATH=/tmp/custom_images/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The images found in the `img_class_FILE_PATH` folder will be picked up and automatically converted to C++ files during
+the CMake configuration stage and then compiled into the application during the build phase for performing inference
+with.
+
+The log from the configuration stage should tell you what image directory path has been used:
+
+```log
+-- User option img_class_FILE_PATH is set to /tmp/custom_images
+-- User option img_class_IMAGE_SIZE is set to 224
+...
+-- Generating image files from /tmp/custom_images
+++ Converting custom_image1.bmp to custom_image1.cc
+...
+-- Defined build user options:
+...
+-- img_class_FILE_PATH=/tmp/custom_images
+-- img_class_IMAGE_SIZE=224
+```
+
+After compiling, your custom images will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter IMAGE_SIZE should match the model input size. When building the application,
+if the size of any image does not match IMAGE_SIZE then it will be rescaled and padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter MODEL_TFLITE_PATH.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler
+>successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+labels_mobilenet_v2_1.0_224.txt file for an example.
+
+Then, you must set `img_class_MODEL_TFLITE_PATH` to the location of the Vela processed model file and
+`img_class_LABELS_TXT_FILE` to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dimg_class_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -Dimg_class_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `img_class_MODEL_TFLITE_PATH` and labels text file pointed to by
+`img_class_LABELS_TXT_FILE` will be converted to C++ files during the CMake configuration stage and then compiled into
+the application for performing inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option img_class_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option img_class_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 code sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+$./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Pre-built application binary ethos-u-img_class.axf can be found in the bin/mps3-sse-300 folder of the delivery package.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-img_class.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `img_class_FILE_PATH` pointed to a single file (or a folder containing a single image)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from
+telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next image
+2. Classify image at chosen index
+3. Run classification on all images
+4. Show NN model info
+5. List images
+
+Choice:
+
+```
+
+1. “Classify next image” menu option will run single inference on the next in line image from the collection of the
+    compiled images.
+
+2. “Classify image at chosen index” menu option will run single inference on the chosen image.
+
+    > **Note:** Please make sure to select image index in the range of supplied images during application build.
+    By default, pre-built application has 4 images, indexes from 0 to 3.
+
+3. “Run classification on all images” menu option triggers sequential inference executions on all built-in images.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is UINT8
+    [INFO] 	tensor occupies 150528 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 224
+    [INFO] 		2: 224
+    [INFO] 		3:   3
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.007812
+    [INFO] ZeroPoint[0] = 128
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is UINT8
+    [INFO] 	tensor occupies 1001 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 1001
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.098893
+    [INFO] ZeroPoint[0] = 58
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 521760
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+    ```
+
+5. “List Images” menu option prints a list of pair image indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => cat.bmp
+    [INFO] 1 => dog.bmp
+    [INFO] 2 => kimono.bmp
+    [INFO] 3 => tiger.bmp
+    ```
+
+### Running Image Classification
+
+Please select the first menu option to execute Image Classification.
+
+The following example illustrates application output for classification:
+
+```log
+[INFO] Running inference on image 0 => cat.bmp
+[INFO] Profile for Inference:
+	Active NPU cycles: 7622641
+	Idle NPU cycles:   525
+
+[INFO] 0) 282 (14.636096) -> tabby, tabby cat
+[INFO] 1) 286 (14.537203) -> Egyptian cat
+[INFO] 2) 283 (12.757138) -> tiger cat
+[INFO] 3) 458 (7.021370) -> bow tie, bow-tie, bowtie
+[INFO] 4) 288 (7.021370) -> lynx, catamount
+```
+
+It could take several minutes to complete one inference run (average time is 2-3 minutes).
+
+The log shows the inference results for “image 0” (0 - index) that corresponds to “cat.bmp” in the sample image resource
+folder.
+
+The profiling section of the log shows that for this inference:
+
+- Ethos-U55's PMU report:
+
+  - 7,622,641 active cycles: number of NPU cycles that were used for computation
+
+  - 525 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the top 5 classes with indexes, confidence score and labels from associated
+labels_mobilenet_v2_1.0_224.txt file. The FVP window also shows the output on its LCD section.
diff --git a/docs/use_cases/inference_runner.md b/docs/use_cases/inference_runner.md
new file mode 100644
index 0000000..ffb205e
--- /dev/null
+++ b/docs/use_cases/inference_runner.md
@@ -0,0 +1,296 @@
+# Inference Runner Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the Code Samples application from sources](#building-the-code-samples-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 code sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Inference Runner](#running-inference-runner)
+- [Inference Runner processing information](#inference-runner-processing-information)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 NPU Inference Runner.
+The inference runner is intended for quickly checking profiling results for any desired network, providing it has been
+processed by the Vela compiler.
+
+A simple model is provided with the Inference Runner as an example, but it is expected that the user will replace this
+model with one they wish to profile, see [Add custom model](./inference_runner.md#Add-custom-model) for more details.
+
+The inference runner is intended for quickly checking profiling results for any desired network
+providing it has been processed by the Vela compiler.
+
+The inference runner will populate all input tensors for the provided model with randomly generated data and an
+inference is then performed. Profiling results are then displayed in the console.
+
+Use case code could be found in [source/use_case/inference_runner](../../source/use_case/inference_runner]) directory.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the Code Samples application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, the Inference Runner use case adds:
+
+- `inference_runner_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and
+  included into the application axf file. The default value points to one of the delivered set of models.
+  Note that the parameters `TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally
+    all back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized model
+    in this case will result in a runtime error.
+
+- `inference_runner_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By
+    default, it is set to 2MiB and should be enough for most models.
+
+In order to build **ONLY** Inference Runner example application add to the `cmake` command line specified in
+[Building](../documentation.md#Building) `-DUSE_CASE_BUILD=inferece_runner`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform
+>see [Building](../documentation.md#Building) section.
+
+Create a build directory and navigate inside:
+
+```commandline
+mkdir build_inference_runner && cd build_inference_runner
+```
+
+On Linux, execute the following command to build **only** Inference Runner application to run on the Ethos-U55 Fast
+Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+>[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-inference_runner.axf
+ â”œâ”€â”€ ethos-u-inference_runner.htm
+ â”œâ”€â”€ ethos-u-inference_runner.map
+ â”œâ”€â”€ images-inference_runner.txt
+ â””── sectors
+      ├── kws
+      │ └── ...
+      └── img_class
+        ├── dram.bin
+        └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-inference_runner.axf`: The built application binary for the Inference Runner use case.
+
+- `ethos-u-inference_runner.map`: Information from building the application (e.g. libraries used, what was optimized,
+    location of objects)
+
+- `ethos-u-inference_runner.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-inference_runner.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/**
+    folder.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter `inference_runner_MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler
+>successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+Then, you must set `inference_runner_MODEL_TFLITE_PATH` to the location of the Vela processed model file.
+
+An example:
+
+```commandline
+cmake \
+  -Dinference_runner_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+  -DTARGET_PLATFORM=mps3 \
+  -DTARGET_SUBSYSTEM=sse-300 \
+  -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `inference_runner_MODEL_TFLITE_PATH` will be converted to C++ files during the CMake
+configuration stage and then compiled into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path has been used:
+
+```stdout
+-- User option inference_runner_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 code sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from
+[Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-infernce_runner.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-inference_runner.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+### Running Inference Runner
+
+After the application has started the inference starts immediately and it outputs the results on the telnet terminal.
+
+The following example illustrates application output:
+
+```log
+[INFO] Profile for Inference:
+       Active NPU cycles: 26976
+       Idle NPU cycles: 196
+```
+
+After running an inference on randomly generated data, the output of the log shows the profiling results that for this
+inference:
+
+- Ethos-U55's PMU report:
+
+  - 26,976 active cycles: number of cycles that were used for computation
+
+  - 196 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
diff --git a/docs/use_cases/kws.md b/docs/use_cases/kws.md
new file mode 100644
index 0000000..316b501
--- /dev/null
+++ b/docs/use_cases/kws.md
@@ -0,0 +1,474 @@
+# Keyword Spotting Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 code sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Keyword Spotting](#running-keyword-spotting)
+- [Keyword Spotting processing information](#keyword-spotting-processing-information)
+  - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+  - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Keyword Spotting
+example.
+
+Use case code could be found in [source/use_case/kws](../../source/use_case/kws]) directory.
+
+### Preprocessing and feature extraction
+
+The DS-CNN keyword spotting model that is supplied with the Code Samples expects audio data to be preprocessed in
+a specific way before performing an inference. This section aims to provide an overview of the feature extraction
+process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as
+>input for machine learning tasks like keyword spotting and speech recognition.
+>See source/application/main/include/Mfcc.hpp for implementation details.
+
+Next, a window of 640 audio samples is taken from the start of the audio clip. From these 640 samples we calculate 10
+MFCC features.
+
+The whole window is shifted to the right by 320 audio samples and 10 new MFCC features are calculated. This process of
+shifting and calculating is repeated until the end of the 16000 audio samples needed to perform an inference is reached.
+In total this will be 49 windows that each have 10 MFCC features calculated for them, giving an input tensor of shape
+49x10.
+
+These extracted features are quantized, and an inference is performed.
+
+![KWS preprocessing](../media/KWS_preprocessing.png)
+
+If the audio clip is longer than 16000 audio samples then the initial starting position is offset by 16000/2 = 8000
+audio samples. From this new starting point, MFCC features for the next 16000 audio samples are calculated and another
+inference is performed (i.e. do an inference for samples 8000-24000).
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on
+>what was used during model training. These values are specific to each model and if you try a different keyword spotting
+>model that uses MFCC input then values are likely to need changing to match the new model.
+In addition, MFCC feature extraction methods can vary slightly with different normalization methods or scaling etc. being used.
+
+### Postprocessing
+
+After an inference is complete the highest probability detected word is output to console, providing its probability is
+larger than a threshold value (default 0.9).
+
+If multiple inferences are performed for an audio clip, then multiple results will be output.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, keyword spotting use case adds:
+
+- `kws_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into the application axf file. The default value points to one of the delivered set of models. Note that the parameters `kws_LABELS_TXT_FILE`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized model in this case will result in a runtime error.
+
+- `kws_FILE_PATH`: Path to the directory containing audio files, or a path to single WAV file, to be used in the application. The default value points
+    to the resources/kws/samples folder containing the delivered set of audio clips.
+
+- `kws_LABELS_TXT_FILE`: Path to the labels' text file. The file is used to map key word class index to the text
+    label. The default value points to the delivered labels.txt file inside the delivery package.
+
+- `kws_AUDIO_RATE`: Input data sampling rate. Each audio file from kws_FILE_PATH is preprocessed during the build to
+    match NN model input requirements. Default value is 16000.
+
+- `kws_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `kws_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `kws_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning the
+    whole audio file will be taken.
+
+- `kws_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter than
+    this number, it is padded with zeros. Default value is 16000.
+
+- `kws_MODEL_SCORE_THRESHOLD`: Threshold value [0.0, 1.0] that must be applied to the inference results for a
+    label to be deemed valid. Default is 0.9
+
+- `kws_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is set
+    to 1MiB and should be enough for most models.
+
+In order to build **ONLY** keyword spotting example application add to the `cmake` command line specified in [Building](../documentation.md#Building) `-DUSE_CASE_BUILD=kws`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform see [Building](../documentation.md#Building) section.
+
+In order to build **only** the keyword spotting example, create a build directory and
+navigate inside, for example:
+
+```commandline
+mkdir build_kws && cd build_kws
+```
+
+On Linux, execute the following command to build Keyword Spotting application to run on the Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see [Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-kws.axf
+ â”œâ”€â”€ ethos-u-kws.htm
+ â”œâ”€â”€ ethos-u-kws.map
+ â”œâ”€â”€ images-kws.txt
+ â””── sectors
+      └── kws
+           ├── dram.bin
+           └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-kws.axf`: The built application binary for the Keyword Spotting use case.
+
+- `ethos-u-kws.map`: Information from building the application (e.g. libraries used, what was optimized, location of
+    objects)
+
+- `ethos-u-kws.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-kws.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/\*\* folder.
+
+### Add custom input
+
+The application performs inference on audio data found in the folder, or an individual file, set by the CMake parameter `kws_FILE_PATH`.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom audio files
+into this folder, for example:
+
+```commandline
+mkdir /tmp/custom_wavs
+
+cp my_clip.wav /tmp/custom_wavs/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `kws_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dkws_FILE_PATH=/tmp/custom_wavs/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DUSE_CASE_BUILD=kws \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The audio clips found in the `kws_FILE_PATH` folder will be picked up and automatically converted to C++ files during the
+CMake configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what audio clip directory path has been used:
+
+```log
+-- User option kws_FILE_PATH is set to /tmp/custom_wavs
+-- Generating audio files from /tmp/custom_wavs
+++ Converting my_clip.wav to my_clip.cc
+++ Generating build/generated/kws/include/AudioClips.hpp
+++ Generating build/generated/kws/src/AudioClips.cc
+-- Defined build user options:
+-- kws_FILE_PATH=/tmp/custom_wavs
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter `kws_AUDIO_MIN_SAMPLES` determine the minimum number of input sample. When building the application,
+if the size of the audio clips is less then `kws_AUDIO_MIN_SAMPLES` then it will be padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter `kws_MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+ds_cnn_labels.txt file for an example.
+
+Then, you must set kws_MODEL_TFLITE_PATH to the location of the Vela processed model file and kws_LABELS_TXT_FILE
+to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dkws_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -Dkws_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DUSE_CASE_BUILD=kws \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `kws_MODEL_TFLITE_PATH` and labels text file pointed to by `kws_LABELS_TXT_FILE` will
+be converted to C++ files during the CMake configuration stage and then compiled into the application for performing
+inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option kws_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option kws_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 code sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-kws.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-kws.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `kws_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run inference on the next in line voice clip from the collection of the
+    compiled audio.
+
+    > **Note:** Note that if the clip is over a certain length, the application will invoke multiple inference runs to cover the entire file.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+    By default, pre-built application has 4 files, indexes from 0 to 3.
+
+3. “Run classification on all audio clips” menu option triggers sequential inference executions on all built-in voice
+    samples.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 490 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2:  49
+    [INFO] 		3:  10
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 1.107164
+    [INFO] ZeroPoint[0] = 95
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 12 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:  12
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 72848
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+    ```
+
+5. “List audio clips” menu option prints a list of pair audio indexes - the original filenames embedded in the
+    application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => down.wav
+    [INFO] 1 => rightleftup.wav
+    [INFO] 2 => yes.wav
+    [INFO] 3 => yesnogostop.wav
+    ```
+
+### Running Keyword Spotting
+
+Selecting the first option will run inference on the first file.
+
+The following example illustrates application output for classification:
+
+```log
+[INFO] Running inference on audio clip 0 => down.wav
+[INFO] Inference 1/1
+[INFO] Profile for Inference:
+	Active NPU cycles: 680400
+	Idle NPU cycles:   766
+
+[INFO] For timestamp: 0.000000 (inference #: 0); threshold: 0.900000
+[INFO] 		label @ 0: down, score: 0.996094
+```
+
+Each inference should take less than 30 seconds on most systems running Fast Model.
+The profiling section of the log shows that for this inference:
+
+- Ethos-U55's PMU report:
+
+  - 680,400 active cycles: number of cycles that were used for computation
+
+  - 766 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the highest confidence score and the associated label from ds_cnn_labels.txt file.
\ No newline at end of file
diff --git a/docs/use_cases/kws_asr.md b/docs/use_cases/kws_asr.md
new file mode 100644
index 0000000..e79b887
--- /dev/null
+++ b/docs/use_cases/kws_asr.md
@@ -0,0 +1,589 @@
+# Keyword Spotting and Automatic Speech Recognition Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 Code Samples](#setting-up-and-running-ethos-u55-code-samples)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Keyword Spotting and Automatic Speech Recognition](#running-keyword-spotting-and-automatic-speech-recognition)
+- [Keyword Spotting and Automatic Speech Recognition processing information](#keyword-spotting-and-automatic-speech-recognition-processing-information)
+  - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+    - [Keyword Spotting Preprocessing](#keyword-spotting-preprocessing)
+    - [Automatic Speech Recognition Preprocessing](#automatic-speech-recognition-preprocessing)
+  - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running an example of sequential execution of the Keyword Spotting
+and Automatic Speech Recognition models on Cortex-M CPU and Ethos-U NPU.
+
+The Keyword Spotting and Automatic Speech Recognition example demonstrates how to run multiple models sequentially. A
+Keyword Spotting model is first run on the CPU and if a set keyword is detected then an Automatic Speech Recognition
+model is run on Ethos-U55 on the remaining audio.
+Tensor arena memory region is reused between models to optimise application memory footprint.
+
+"Yes" key word is used to trigger full command recognition following the key word.
+Use case code could be found in [source/use_case/kws_asr](../../source/use_case/kws_asr]) directory.
+
+### Preprocessing and feature extraction
+
+In this use-case there are 2 different models being used with different requirements for preprocessing. As such each
+preprocessing process is detailed below. Note that Automatic Speech Recognition only occurs if a keyword is detected in
+the audio clip.
+
+By default the KWS model is run purely on CPU and not on the Ethos-U55.
+
+#### Keyword Spotting Preprocessing
+
+The DS-CNN keyword spotting model that is used with the Code Samples expects audio data to be preprocessed in
+a specific way before performing an inference. This section aims to provide an overview of the feature extraction
+process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as input for machine learning tasks like keyword spotting and speech recognition. See source/application/main/include/Mfcc.hpp for implementation details.
+
+Next, a window of 640 audio samples is taken from the start of the audio clip. From these 640 samples we calculate 10
+MFCC features.
+
+The whole window is shifted to the right by 320 audio samples and 10 new MFCC features are calculated. This process of
+shifting and calculating is repeated until the end of the 16000 audio samples needed to perform an inference is reached.
+In total this will be 49 windows that each have 10 MFCC features calculated for them, giving an input tensor of shape
+49x10.
+
+These extracted features are quantized, and an inference is performed.
+
+If the audio clip is longer than 16000 audio samples then the initial starting position is offset by 16000/2 = 8000
+audio samples. From this new starting point, MFCC features for the next 16000 audio samples are calculated and another
+inference is performed (i.e. do an inference for samples 8000-24000).
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on what was used during model training. These values are specific to each model and if you try a different keyword spotting model that uses MFCC input then values are likely to need changing to match the new model.
+
+In addition, MFCC feature extraction methods can vary slightly with different normalization methods or scaling etc. being used.
+
+#### Automatic Speech Recognition Preprocessing
+
+The wav2letter automatic speech recognition model that is used with the Code Samples expects audio data to be
+preprocessed in a specific way before performing an inference. This section aims to provide an overview of the feature
+extraction process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as input for machine learning tasks like keyword spotting and speech recognition. See source/application/main/include/Mfcc.hpp for implementation details.
+
+Next, a window of 512 audio samples is taken from the start of the audio clip. From these 512 samples we calculate 13
+MFCC features.
+
+The whole window is shifted to the right by 160 audio samples and 13 new MFCC features are calculated. This process of
+shifting and calculating is repeated until enough audio samples to perform an inference have been processed. In total
+this will be 296 windows that each have 13 MFCC features calculated for them.
+
+After extracting MFCC features the first and second order derivatives of these features with respect to time are
+calculated. These derivative features are then standardized and concatenated with the MFCC features (which also get
+standardized). At this point the input tensor will have a shape of 296x39.
+
+These extracted features are quantized, and an inference is performed.
+
+For longer audio clips where multiple inferences need to be performed, then the initial starting position is offset by
+(100\*160) = 16000 audio samples. From this new starting point, MFCC and derivative features are calculated as before
+until there is enough to perform another inference. Padding can be used if there are not enough audio samples for at
+least 1 inference. This step is repeated until the whole audio clip has been processed. If there are not enough audio
+samples for a final complete inference the MFCC features will be padded by repeating the last calculated feature until
+an inference can be performed.
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on what was used during model training. These values are specific to each model. If you switch to a different ASR model than the one supplied, then the feature extraction process could be completely different to the one currently implemented.
+
+The amount of audio samples we offset by for long audio clips is specific to the included wav2letter model.
+
+### Postprocessing
+
+If a keyword is detected then the ASR process is run and the raw output of that inference needs to be postprocessed to
+get a usable result.
+
+The raw output from the model is a tensor of shape 148x29 where each row is a probability distribution over the possible
+29 characters that can appear at each of the 148 time steps.
+
+This wav2letter model is trained using context windows, this means that only certain parts of the output are usable
+depending on the bit of the audio clip that is currently being processed.
+
+If this is the first inference and multiple inferences are required, then ignore the final 49 rows of the output.
+Similarly, if this is the final inference from multiple inferences then ignore the first 49 rows of the output. Finally,
+if this inference is not the last or first inference then ignore the first and last 49 rows of the model output.
+
+> **Note:** If the audio clip is small enough then the whole of the model output is usable and there is no need to throw away any of the output before continuing.
+
+Once any rows have been removed the final processing can be done. To process the output, first the letter with the
+highest probability at each time step is found. Next, any letters that are repeated multiple times in a row are removed
+(e.g. [t, t, t, o, p, p] becomes [t, o, p]). Finally, the 29^th^ blank token letter is removed from the output.
+
+For the final output, the result from all inferences are combined before decoding. What you are left with is then
+displayed to the console.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Keyword Spotting and Automatic Speech
+Recognition use case adds:
+
+- `kws_asr_MODEL_TFLITE_PATH_ASR` and `kws_asr_MODEL_TFLITE_PATH_KWS`: Path to the NN model files in TFLite format.
+    Models will be processed and included into the application axf file. The default value points to one of the delivered set of models.
+    Note that the parameters `kws_asr_LABELS_TXT_FILE_KWS`, `kws_asr_LABELS_TXT_FILE_ASR`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED`
+    should be aligned with the chosen model, i.e:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized model in this case will result in a runtime error.
+
+- `kws_asr_FILE_PATH`: Path to the directory containing audio files, or a path to single WAV file, to be used in the application. The default value
+    points to the resources/kws_asr/samples folder containing the delivered set of audio clips.
+
+- `kws_asr_LABELS_TXT_FILE_KWS` and `kws_asr_LABELS_TXT_FILE_ASR`: Path respectively to keyword spotting labels' and the automatic speech
+    recognition labels' text files. The file is used to map
+    letter class index to the text label. The default value points to the delivered labels.txt file inside the delivery
+    package.
+
+- `kws_asr_AUDIO_RATE`: Input data sampling rate. Each audio file from kws_asr_FILE_PATH is preprocessed during the
+    build to match NN model input requirements. Default value is 16000.
+
+- `kws_asr_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `kws_asr_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `kws_asr_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning
+    the whole audio file will be taken.
+
+- `kws_asr_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter
+    than this number, it is padded with zeros. Default value is 16000.
+
+- `kws_asr_MODEL_SCORE_THRESHOLD_KWS`: Threshold value that must be applied to the keyword spotting inference
+    results for a label to be deemed valid. Default is 0.9.
+
+- `kws_asr_MODEL_SCORE_THRESHOLD_ASR`: Threshold value that must be applied to the automatic speech recognition
+    inference results for a label to be deemed valid. Default is 0.5.
+
+- `kws_asr_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is
+    set to 2MiB and should be enough for most models.
+
+In order to build **ONLY** Keyword Spotting and Automatic Speech
+Recognition example application add to the `cmake` command line specified in [Building](../documentation.md#Building) `-DUSE_CASE_BUILD=kws_asr`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform see [Building](../documentation.md#Building).
+
+Create a build directory and navigate inside:
+
+```commandline
+mkdir build_kws_asr && cd build_kws_asr
+```
+
+On Linux, execute the following command to build the application to run on the Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see [Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-kws_asr.axf
+ â”œâ”€â”€ ethos-u-kws_asr.htm
+ â”œâ”€â”€ ethos-u-kws_asr.map
+ â”œâ”€â”€ images-kws_asr.txt
+ â””── sectors
+      └── kws_asr
+           ├── dram.bin
+           └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-kws_asr.axf`: The built application binary for the Keyword Spotting and Automatic Speech Recognition use
+    case.
+
+- `ethos-u-kws_asr.map`: Information from building the application (e.g. libraries used, what was optimized, location
+    of objects)
+
+- `ethos-u-kws_asr.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-kws_asr.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/** folder.
+
+### Add custom input
+
+The application performs inference on data found in the folder set by the CMake parameter `kws_asr_FILE_PATH`.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom files into
+this folder:
+
+```commandline
+mkdir /tmp/custom_files
+
+cp custom_audio1.wav /tmp/custom_files/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `kws_asr_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dkws_asr_FILE_PATH=/tmp/custom_files/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr- ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The files found in the `kws_asr_FILE_PATH` folder will be picked up and automatically converted to C++ files during the
+CMake configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what directory path has been used:
+
+```log
+-- User option kws_asr_FILE_PATH is set to /tmp/custom_files
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+### Add custom model
+
+The application performs KWS inference using the model pointed to by the CMake parameter `kws_asr_MODEL_TFLITE_PATH_KWS` and
+ASR inference using the model pointed to by the CMake parameter `kws_asr_MODEL_TFLITE_PATH_ASR`.
+
+This section assumes you wish to change the existing ASR model to a custom one. If instead you wish to change the KWS
+model then the instructions will be the same except ASR will change to KWS.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+labels_wav2letter.txt file for an example.
+
+Then, you must set `kws_asr_MODEL_TFLITE_PATH_ASR` to the location of the Vela processed model file and
+`kws_asr_LABELS_TXT_FILE_ASR` to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dkws_asr_MODEL_TFLITE_PATH_ASR=<path/to/custom_asr_model_after_vela.tflite> \
+    -Dkws_asr_LABELS_TXT_FILE_ASR=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model files pointed to by `kws_asr_MODEL_TFLITE_PATH_KWS` and `kws_asr_MODEL_TFLITE_PATH_ASR`, labels text files pointed to by `kws_asr_LABELS_TXT_FILE_KWS` and `kws_asr_LABELS_TXT_FILE_ASR`
+will be converted to C++ files during the CMake configuration stage and then compiled into the application for
+performing inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option TARGET_PLATFORM is set to mps3
+-- User option kws_asr_MODEL_TFLITE_PATH_ASR is set to <path/to/custom_asr_model_after_vela.tflite>
+...
+-- User option kws_asr_LABELS_TXT_FILE_ASR is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_asr_model_after_vela.tflite>
+++ Converting custom_asr_model_after_vela.tflite to\
+custom_asr_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to Labels_wav2letter
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 Code Samples
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-kws_asr.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+$ ~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-kws_asr.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `kws_asr_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run single inference on the next included file.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+
+3. “Run ... on all” menu option triggers sequential inference executions on all built-in files.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 490 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2:  49
+    [INFO] 		3:  10
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 1.107164
+    [INFO] ZeroPoint[0] = 95
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 12 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:  12
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 123616
+    [INFO] Number of operators: 16
+    [INFO] 	Operator 0: RESHAPE
+    [INFO] 	Operator 1: CONV_2D
+    [INFO] 	Operator 2: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 3: CONV_2D
+    [INFO] 	Operator 4: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 5: CONV_2D
+    [INFO] 	Operator 6: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 7: CONV_2D
+    [INFO] 	Operator 8: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 9: CONV_2D
+    [INFO] 	Operator 10: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 11: CONV_2D
+    [INFO] 	Operator 12: AVERAGE_POOL_2D
+    [INFO] 	Operator 13: RESHAPE
+    [INFO] 	Operator 14: FULLY_CONNECTED
+    [INFO] 	Operator 15: SOFTMAX
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 11544 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 296
+    [INFO] 		2:  39
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.110316
+    [INFO] ZeroPoint[0] = -11
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 4292 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2: 148
+    [INFO] 		3:  29
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 809808
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    ```
+
+5. “List” menu option prints a list of pair ... indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => yesnogostop.wav
+    ```
+
+### Running Keyword Spotting and Automatic Speech Recognition
+
+Please select the first menu option to execute Keyword Spotting and Automatic Speech Recognition.
+
+The following example illustrates application output:
+
+```log
+[INFO] KWS audio data window size 16000
+[INFO] Running KWS inference on audio clip 0 => yesnogostop.wav
+[INFO] Inference 1/7
+[INFO] Profile for Inference:
+	Active NPU cycles: 0
+	Idle NPU cycles:   6
+
+[INFO] For timestamp: 0.000000 (inference #: 0); threshold: 0.900000
+[INFO] 		label @ 0: yes, score: 0.996094
+[INFO] Keyword spotted
+[INFO] Inference 1/2
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924742
+	Idle NPU cycles:   424
+
+[INFO] Inference 2/2
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924740
+	Idle NPU cycles:   426
+
+[INFO] Result for inf 0: no gow
+[INFO] Result for inf 1:  stoppe
+[INFO] Final result: no gow stoppe
+```
+
+It could take several minutes to complete one inference run (average time is 2-3 minutes).
+
+Using the input “yesnogostop.wav”, the log shows inference results for the KWS operation first, detecting the
+trigger word “yes“ with the stated probability score (in this case 0.99). After this, the ASR inference is run,
+printing the words recognized from the input sample.
+
+The profiling section of the log shows that for the ASR inference:
+
+- Ethos-U55's PMU report:
+
+  - 28,924,740 active cycles: number of cycles that were used for computation
+
+  - 426 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+the CPU model is not cycle-approximate or cycle-accurate.
+
+    Note that in this example the KWS inference does not use the Ethos-U55 and is run purely on CPU, therefore 0 Active
+    NPU cycles is shown.
diff --git a/model_conditioning_examples/Readme.md b/model_conditioning_examples/Readme.md
new file mode 100644
index 0000000..ede2c24
--- /dev/null
+++ b/model_conditioning_examples/Readme.md
@@ -0,0 +1,173 @@
+# Model conditioning examples
+
+- [Introduction](#introduction)
+  - [How to run](#how-to-run)
+- [Quantization](#quantization)
+  - [Post-training quantization](#post-training-quantization)
+  - [Quantization aware training](#quantization-aware-training)
+- [Weight pruning](#weight-pruning)
+- [Weight clustering](#weight-clustering)
+- [References](#references)
+
+## Introduction
+
+This folder contains short example scripts that demonstrate some methods available in TensorFlow to condition your model
+in preparation for deployment on Arm Ethos NPU.
+
+These scripts will cover three main topics:
+
+- Quantization
+- Weight clustering
+- Weight pruning
+
+The objective of these scripts is not to be a single source of knowledge on everything related to model conditioning.
+Instead the aim is to provide the reader with a quick starting point that demonstrates some commonly used tools that
+will enable models to run on Arm Ethos NPU and also optimize them to enable maximum performance from the Arm Ethos NPU.
+
+Links to more in-depth guides available on the TensorFlow website are provided in the [references](#references) section
+in this Readme.
+
+### How to run
+
+From the `model_conditioning_examples` folder run the following command:
+
+```commandline
+./setup.sh
+```
+
+This will create a Python virtual environment and install the required versions of TensorFlow and TensorFlow model
+optimization toolkit to run the examples scripts.
+
+If the virtual environment has not been activated you can do so by running:
+
+```commandline
+source ./env/bin/activate
+```
+
+You can then run the examples from the command line. For example to run the post-training quantization example:
+
+```commandline
+python ./post_training_quantization.py
+```
+
+The produced TensorFlow Lite model files will be saved in a `conditioned_models` sub-folder.
+
+## Quantization
+
+Most machine learning models are trained using 32bit floating point precision. However, Arm Ethos NPU performs
+calculations in 8bit integer precision. As a result, it is required that any model you wish to deploy on Arm Ethos NPU is
+first fully quantized to 8bits.
+
+TensorFlow provides two methods of quantization and the scripts in this folder will demonstrate these:
+
+- [Post-training quantization](./post_training_quantization.py)
+- [Quantization aware training](./quantization_aware_training.py)
+
+Both of these techniques will not only quantize weights of the the model but also the variable tensors such as model
+input and output, and the activations of each intermediate layer.
+
+For details on the quantization specification used by TensorFlow please see
+[here](https://www.tensorflow.org/lite/performance/quantization_spec).
+
+In both methods scale and zero point values are chosen to allow the floating point weights to be maximally
+represented in this reduced precision. Quantization is performed per-axis, meaning a different scale and zero point
+is used for each channel of a layer's weights.
+
+### Post-training quantization
+
+The first of the quantization methods that will be covered is called post-training quantization. As the name suggests
+this form of quantization takes place after training of your model is complete. It is also the simpler of the methods
+we will show to quantize a model.
+
+In post-training quantization, first the weights of the model are quantized to 8bit integer values. After this we
+quantize the variable tensors, such as layer activations. To do this we need to calculate the potential range of values
+that all these tensors can take.
+
+Calculating these ranges requires a small dataset that is representative of what you expect your model to see when
+it is deployed. Model inference is then performed using this representative dataset and the resulting minimum and
+maximum values for variable tensors are calculated.
+
+Only a small number of samples need to be used in this calibration dataset (around 100-500 should be enough). These
+samples can be taken from the training or validation sets.
+
+Quantizing your model can result in accuracy drops depending on your model. However for a lot of use cases the accuracy
+drop when using post-training quantization is usually minimal. After post-training quantization is complete you will
+have a fully quantized TensorFlow Lite model.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+### Quantization aware training
+
+Depending on the model, the use of post-training quantization can result in an accuracy drop that is too large to be
+considered suitable. This is where quantization aware training can be used to improve things. Quantization aware
+training simulates the quantization of weights and activations during the inference stage of training using fake
+quantization nodes.
+
+By simulating quantization during training, the model weights will be adjusted in the backward pass so that they are
+better suited for the reduced precision of quantization. It is this simulating of quantization and adjusting of weights
+that can minimize accuracy loss incurred when quantizing. Note that quantization is only simulated
+at this stage and backward passes of training are still performed in full floating point precision.
+
+Importantly, with quantization aware training you do not have to train your model from scratch to use it. Instead, you
+can train it normally (not quantization aware) and after training is complete you can then fine-tune it using
+quantization aware training. By only fine-tuning you can save a lot of training time.
+
+As well as simulating quantization and adjusting weights, the ranges for variable tensors are captured so that the
+model can be fully quantized afterwards. Once you have finished quantization aware training the TensorFlow Lite converter is
+used to produce a fully quantized TensorFlow Lite model.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+## Weight pruning
+
+After you have trained your deep learning model it is common to see that many of the weights in the model
+have the value of 0, and also have many values very close to 0. These weights have very little effect in network
+calculations so are safe to be removed or 'pruned' from the model. This is accomplished by setting all these weight
+values to 0, resulting in a sparse model.
+
+Compression algorithms can then take advantage of this to reduce model size in memory, which can be very important when
+deploying on small embedded systems. Moreover, Arm Ethos NPU can take advantage of model sparsity to further accelerate
+execution of a model.
+
+Training with weight pruning will force your model to have a certain percentage of its weights set (or 'pruned') to 0
+during the training phase. This is done by forcing those that are closest to 0 to become 0. Doing it during training
+guarantees your model will have a certain level of sparsity and the weights of your model can also be better adapted
+to the sparsity level chosen. This means, accuracy loss will hopefully be minimized if a large pruning percentage
+is desired.
+
+Weight pruning can be further combined with quantization so you have a model that is both pruned and quantized, meaning
+that the memory saving affects of both can be combined. Quantization then allows the model to be used with
+Arm Ethos NPU.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+## Weight clustering
+
+Another method of model conditioning is weight clustering (also called weight sharing). With this technique, a fixed
+number of values (cluster centers) are used in each layer of a model to represent all the possible values that the
+layer's weights take. The weights in a layer will then use the value of their closest cluster center. By restricting
+the number of possible clusters, weight clustering reduces the amount of memory needed to store all the weight values
+in a model.
+
+Depending on the model and number of clusters chosen, using this kind of technique can have a negative effect on
+accuracy. To reduce the impact on accuracy you can introduce clustering during training so the models weights can be
+better adjusted to the reduced precision.
+
+Weight clustering can be further combined with quantization so you have a model that is both clustered and quantized,
+meaning that the memory saving affects of both can be combined. Quantization then allows the model to be used with
+Arm Ethos NPU.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used (see [Optimize model with Vela compiler](./building.md#optimize-custom-model-with-vela-compiler)).
+
+## References
+
+- [TensorFlow Model Optimization Toolkit](https://www.tensorflow.org/model_optimization)
+- [Post-training quantization](https://www.tensorflow.org/lite/performance/post_training_integer_quant)
+- [Quantization aware training](https://www.tensorflow.org/model_optimization/guide/quantization/training)
+- [Weight pruning](https://www.tensorflow.org/model_optimization/guide/pruning)
+- [Weight clustering](https://www.tensorflow.org/model_optimization/guide/clustering)
+- [Vela](https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/)
diff --git a/model_conditioning_examples/post_training_quantization.py b/model_conditioning_examples/post_training_quantization.py
new file mode 100644
index 0000000..ab535ac
--- /dev/null
+++ b/model_conditioning_examples/post_training_quantization.py
@@ -0,0 +1,139 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with an example of how to perform post-training quantization in TensorFlow.
+
+The output from this example will be a TensorFlow Lite model file where weights and activations are quantized to 8bit
+integer values.
+
+Quantization helps reduce the size of your models and is necessary for running models on certain hardware such as Arm
+Ethos NPU.
+
+In addition to quantizing weights, post-training quantization uses a calibration dataset to
+capture the minimum and maximum values of all variable tensors in your model.
+By capturing these ranges it is possible to fully quantize not just the weights of the model but also the activations.
+
+Depending on the model you are quantizing there may be some accuracy loss, but for a lot of models the loss should
+be minimal.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using Vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on post-training quantization
+see: https://www.tensorflow.org/lite/performance/post_training_integer_quant
+"""
+import pathlib
+
+import numpy as np
+import tensorflow as tf
+
+from training_utils import get_data, create_model
+
+
+def post_training_quantize(keras_model, sample_data):
+    """Quantize Keras model using post-training quantization with some sample data.
+
+    TensorFlow Lite will have fp32 inputs/outputs and the model will handle quantizing/dequantizing.
+
+    Args:
+        keras_model: Keras model to quantize.
+        sample_data: A numpy array of data to use as a representative dataset.
+
+    Returns:
+        Quantized TensorFlow Lite model.
+    """
+
+    converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
+
+    # We set the following converter options to ensure our model is fully quantized.
+    # An error should get thrown if there is any ops that can't be quantized.
+    converter.optimizations = [tf.lite.Optimize.DEFAULT]
+    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
+
+    # To use post training quantization we must provide some sample data that will be used to
+    # calculate activation ranges for quantization. This data should be representative of the data
+    # we expect to feed the model and must be provided by a generator function.
+    def generate_repr_dataset():
+        for i in range(100):  # 100 samples is all we should need in this example.
+            yield [np.expand_dims(sample_data[i], axis=0)]
+
+    converter.representative_dataset = generate_repr_dataset
+    tflite_model = converter.convert()
+
+    return tflite_model
+
+
+def evaluate_tflite_model(tflite_save_path, x_test, y_test):
+    """Calculate the accuracy of a TensorFlow Lite model using TensorFlow Lite interpreter.
+
+    Args:
+        tflite_save_path: Path to TensorFlow Lite model to test.
+        x_test: numpy array of testing data.
+        y_test: numpy array of testing labels (sparse categorical).
+    """
+
+    interpreter = tf.lite.Interpreter(model_path=str(tflite_save_path))
+
+    interpreter.allocate_tensors()
+    input_details = interpreter.get_input_details()
+    output_details = interpreter.get_output_details()
+
+    accuracy_count = 0
+    num_test_images = len(y_test)
+
+    for i in range(num_test_images):
+        interpreter.set_tensor(input_details[0]['index'], x_test[i][np.newaxis, ...])
+        interpreter.invoke()
+        output_data = interpreter.get_tensor(output_details[0]['index'])
+
+        if np.argmax(output_data) == y_test[i]:
+            accuracy_count += 1
+
+    print(f"Test accuracy quantized: {accuracy_count / num_test_images:.3f}")
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # Compile and train the model in fp32 as normal.
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                  loss=tf.keras.losses.sparse_categorical_crossentropy,
+                  metrics=['accuracy'])
+
+    model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the fp32 model accuracy.
+    test_loss, test_acc = model.evaluate(x_test, y_test)
+    print(f"Test accuracy float: {test_acc:.3f}")
+
+    # Quantize and export the resulting TensorFlow Lite model to file.
+    tflite_model = post_training_quantize(model, x_train)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    quant_model_save_path = tflite_models_dir / 'post_training_quant_model.tflite'
+    with open(quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test the quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/model_conditioning_examples/quantization_aware_training.py b/model_conditioning_examples/quantization_aware_training.py
new file mode 100644
index 0000000..acb768c
--- /dev/null
+++ b/model_conditioning_examples/quantization_aware_training.py
@@ -0,0 +1,139 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with a short example of how to perform quantization aware training in TensorFlow using the
+TensorFlow Model Optimization Toolkit.
+
+The output from this example will be a TensorFlow Lite model file where weights and activations are quantized to 8bit
+integer values.
+
+Quantization helps reduce the size of your models and is necessary for running models on certain hardware such as Arm
+Ethos NPU.
+
+In quantization aware training (QAT), the error introduced with quantizing from fp32 to int8 is simulated using
+fake quantization nodes. By simulating this quantization error when training, the model can learn better adapted
+weights and minimize accuracy losses caused by the reduced precision.
+
+Minimum and maximum values for activations are also captured during training so activations for every layer can be
+quantized along with the weights later.
+
+Quantization is only simulated during training and the training backward passes are still performed in full float
+precision. Actual quantization happens when generating a TensorFlow Lite model.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on quantization aware training
+see: https://www.tensorflow.org/model_optimization/guide/quantization/training
+"""
+import pathlib
+
+import numpy as np
+import tensorflow as tf
+import tensorflow_model_optimization as tfmot
+
+from training_utils import get_data, create_model
+
+
+def quantize_and_convert_to_tflite(keras_model):
+    """Quantize and convert Keras model trained with QAT to TensorFlow Lite.
+
+    TensorFlow Lite will have fp32 inputs/outputs and the model will handle quantizing/dequantizing.
+
+    Args:
+        keras_model: Keras model trained with quantization aware training.
+
+    Returns:
+        Quantized TensorFlow Lite model.
+    """
+
+    converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
+
+    # After doing quantization aware training all the information for creating a fully quantized
+    # TensorFlow Lite model is already within the quantization aware Keras model.
+    # This means we only need to call convert with default optimizations to generate the quantized TensorFlow Lite model.
+    converter.optimizations = [tf.lite.Optimize.DEFAULT]
+    tflite_model = converter.convert()
+
+    return tflite_model
+
+
+def evaluate_tflite_model(tflite_save_path, x_test, y_test):
+    """Calculate the accuracy of a TensorFlow Lite model using TensorFlow Lite interpreter.
+
+    Args:
+        tflite_save_path: Path to TensorFlow Lite model to test.
+        x_test: numpy array of testing data.
+        y_test: numpy array of testing labels (sparse categorical).
+    """
+
+    interpreter = tf.lite.Interpreter(model_path=str(tflite_save_path))
+
+    interpreter.allocate_tensors()
+    input_details = interpreter.get_input_details()
+    output_details = interpreter.get_output_details()
+
+    accuracy_count = 0
+    num_test_images = len(y_test)
+
+    for i in range(num_test_images):
+        interpreter.set_tensor(input_details[0]['index'], x_test[i][np.newaxis, ...])
+        interpreter.invoke()
+        output_data = interpreter.get_tensor(output_details[0]['index'])
+
+        if np.argmax(output_data) == y_test[i]:
+            accuracy_count += 1
+
+    print(f"Test accuracy quantized: {accuracy_count / num_test_images:.3f}")
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # When working with the TensorFlow Keras API and the TF Model Optimization Toolkit we can make our
+    # model quantization aware in one line. Once this is done we compile the model and train as normal.
+    # It is important to note that the model is only quantization aware and is not quantized yet. The weights are
+    # still floating point and will only be converted to int8 when we generate the TensorFlow Lite model later on.
+    quant_aware_model = tfmot.quantization.keras.quantize_model(model)
+
+    quant_aware_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                              loss=tf.keras.losses.sparse_categorical_crossentropy,
+                              metrics=['accuracy'])
+
+    quant_aware_model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the quantization aware model accuracy.
+    test_loss, test_acc = quant_aware_model.evaluate(x_test, y_test)
+    print(f"Test accuracy quant aware: {test_acc:.3f}")
+
+    # Quantize and save the resulting TensorFlow Lite model to file.
+    tflite_model = quantize_and_convert_to_tflite(quant_aware_model)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    quant_model_save_path = tflite_models_dir / 'qat_quant_model.tflite'
+    with open(quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/model_conditioning_examples/requirements.txt b/model_conditioning_examples/requirements.txt
new file mode 100644
index 0000000..96e15a3
--- /dev/null
+++ b/model_conditioning_examples/requirements.txt
@@ -0,0 +1,3 @@
+tensorflow==2.4.0
+tensorflow-model-optimization==0.5.0
+numpy==1.19.5
\ No newline at end of file
diff --git a/model_conditioning_examples/setup.sh b/model_conditioning_examples/setup.sh
new file mode 100644
index 0000000..f552662
--- /dev/null
+++ b/model_conditioning_examples/setup.sh
@@ -0,0 +1,21 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#!/bin/bash
+python3 -m venv ./env
+source ./env/bin/activate
+pip install -U pip
+pip install -r requirements.txt
\ No newline at end of file
diff --git a/model_conditioning_examples/training_utils.py b/model_conditioning_examples/training_utils.py
new file mode 100644
index 0000000..3467b2a
--- /dev/null
+++ b/model_conditioning_examples/training_utils.py
@@ -0,0 +1,61 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+Utility functions related to data and models that are common to all the model conditioning examples.
+"""
+import tensorflow as tf
+import numpy as np
+
+
+def get_data():
+    """Downloads and returns the pre-processed data and labels for training and testing.
+
+    Returns:
+        Tuple of: (train data, train labels, test data, test labels)
+    """
+
+    # To save time we use the MNIST dataset for this example.
+    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
+
+    # Convolution operations require data to have 4 dimensions.
+    # We divide by 255 to help training and cast to float32 for TensorFlow.
+    x_train = (x_train[..., np.newaxis] / 255.0).astype(np.float32)
+    x_test = (x_test[..., np.newaxis] / 255.0).astype(np.float32)
+
+    return x_train, y_train, x_test, y_test
+
+
+def create_model():
+    """Create and returns a simple Keras model for training MNIST.
+
+    We will use a simple convolutional neural network for this example,
+    but the model optimization methods employed should be compatible with a
+    wide variety of CNN architectures such as Mobilenet and Inception etc.
+
+    Returns:
+        Uncompiled Keras model.
+    """
+
+    keras_model = tf.keras.models.Sequential([
+        tf.keras.layers.Conv2D(32, 3, padding='same', input_shape=(28, 28, 1), activation=tf.nn.relu),
+        tf.keras.layers.Conv2D(32, 3, padding='same', activation=tf.nn.relu),
+        tf.keras.layers.MaxPool2D(),
+        tf.keras.layers.Conv2D(32, 3, padding='same', activation=tf.nn.relu),
+        tf.keras.layers.MaxPool2D(),
+        tf.keras.layers.Flatten(),
+        tf.keras.layers.Dense(units=10, activation=tf.nn.softmax)
+    ])
+
+    return keras_model
diff --git a/model_conditioning_examples/weight_clustering.py b/model_conditioning_examples/weight_clustering.py
new file mode 100644
index 0000000..54f241c
--- /dev/null
+++ b/model_conditioning_examples/weight_clustering.py
@@ -0,0 +1,107 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with a short example of how to perform clustering of weights (weight sharing) in
+TensorFlow using the TensorFlow Model Optimization Toolkit.
+
+The output from this example will be a TensorFlow Lite model file where weights in each layer have been 'clustered' into
+16 clusters during training - quantization has then been applied on top of this.
+
+By clustering the model we can improve compression of the model file. This can be essential for deploying certain
+models on systems with limited resources - such as embedded systems using an Arm Ethos NPU.
+
+After performing clustering we do post-training quantization to quantize the model and then generate a TensorFlow Lite file.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using Vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on clustering see: https://www.tensorflow.org/model_optimization/guide/clustering
+"""
+import pathlib
+
+import tensorflow as tf
+import tensorflow_model_optimization as tfmot
+
+from training_utils import get_data, create_model
+from post_training_quantization import post_training_quantize, evaluate_tflite_model
+
+
+def prepare_for_clustering(keras_model):
+    """Prepares a Keras model for clustering."""
+
+    # Choose the number of clusters to use and how to initialize them. Using more clusters will generally
+    # reduce accuracy so you will need to find the optimal number for your use-case.
+    number_of_clusters = 16
+    cluster_centroids_init = tfmot.clustering.keras.CentroidInitialization.LINEAR
+
+    # Apply the clustering wrapper to the whole model so weights in every layer will get clustered. You may find that
+    # to avoid too much accuracy loss only certain non-critical layers in your model should be clustered.
+    clustering_ready_model = tfmot.clustering.keras.cluster_weights(keras_model,
+                                                                    number_of_clusters=number_of_clusters,
+                                                                    cluster_centroids_init=cluster_centroids_init)
+
+    # We must recompile the model after making it ready for clustering.
+    clustering_ready_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                                   loss=tf.keras.losses.sparse_categorical_crossentropy,
+                                   metrics=['accuracy'])
+
+    return clustering_ready_model
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # Compile and train the model first.
+    # In general it is easier to do clustering as a fine-tuning step after the model is fully trained.
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                  loss=tf.keras.losses.sparse_categorical_crossentropy,
+                  metrics=['accuracy'])
+
+    model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the trained model accuracy.
+    test_loss, test_acc = model.evaluate(x_test, y_test)
+    print(f"Test accuracy before clustering: {test_acc:.3f}")
+
+    # Prepare the model for clustering.
+    clustered_model = prepare_for_clustering(model)
+
+    # Continue training the model but now with clustering applied.
+    clustered_model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, verbose=1, shuffle=True)
+    test_loss, test_acc = clustered_model.evaluate(x_test, y_test)
+    print(f"Test accuracy after clustering: {test_acc:.3f}")
+
+    # Remove all variables that clustering only needed in the training phase.
+    model_for_export = tfmot.clustering.keras.strip_clustering(clustered_model)
+
+    # Apply post-training quantization on top of the clustering and save the resulting TensorFlow Lite model to file.
+    tflite_model = post_training_quantize(model_for_export, x_train)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    clustered_quant_model_save_path = tflite_models_dir / 'clustered_post_training_quant_model.tflite'
+    with open(clustered_quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test the clustered quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(clustered_quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/model_conditioning_examples/weight_pruning.py b/model_conditioning_examples/weight_pruning.py
new file mode 100644
index 0000000..bf26f1f
--- /dev/null
+++ b/model_conditioning_examples/weight_pruning.py
@@ -0,0 +1,106 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with a short example of how to perform magnitude-based weight pruning in TensorFlow
+using the TensorFlow Model Optimization Toolkit.
+
+The output from this example will be a TensorFlow Lite model file where ~75% percent of the weights have been 'pruned' to the
+value 0 during training - quantization has then been applied on top of this.
+
+By pruning the model we can improve compression of the model file. This can be essential for deploying certain models
+on systems with limited resources - such as embedded systems using Arm Ethos NPU. Also, if the pruned model is run
+on an Arm Ethos NPU then this pruning can improve the execution time of the model.
+
+After pruning is complete we do post-training quantization to quantize the model and then generate a TensorFlow Lite file.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using Vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on weight pruning see: https://www.tensorflow.org/model_optimization/guide/pruning
+"""
+import pathlib
+
+import tensorflow as tf
+import tensorflow_model_optimization as tfmot
+
+from training_utils import get_data, create_model
+from post_training_quantization import post_training_quantize, evaluate_tflite_model
+
+
+def prepare_for_pruning(keras_model):
+    """Prepares a Keras model for pruning."""
+
+    # We use a constant sparsity schedule so the amount of sparsity in the model is kept at the same percent throughout
+    # training. An alternative is PolynomialDecay where sparsity can be gradually increased during training.
+    pruning_schedule = tfmot.sparsity.keras.ConstantSparsity(target_sparsity=0.75, begin_step=0)
+
+    # Apply the pruning wrapper to the whole model so weights in every layer will get pruned. You may find that to avoid
+    # too much accuracy loss only certain non-critical layers in your model should be pruned.
+    pruning_ready_model = tfmot.sparsity.keras.prune_low_magnitude(keras_model, pruning_schedule=pruning_schedule)
+
+    # We must recompile the model after making it ready for pruning.
+    pruning_ready_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                                loss=tf.keras.losses.sparse_categorical_crossentropy,
+                                metrics=['accuracy'])
+
+    return pruning_ready_model
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # Compile and train the model first.
+    # In general it is easier to do pruning as a fine-tuning step after the model is fully trained.
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                  loss=tf.keras.losses.sparse_categorical_crossentropy,
+                  metrics=['accuracy'])
+
+    model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the trained model accuracy.
+    test_loss, test_acc = model.evaluate(x_test, y_test)
+    print(f"Test accuracy before pruning: {test_acc:.3f}")
+
+    # Prepare the model for pruning and add the pruning update callback needed in training.
+    pruned_model = prepare_for_pruning(model)
+    callbacks = [tfmot.sparsity.keras.UpdatePruningStep()]
+
+    # Continue training the model but now with pruning applied - remember to pass in the callbacks!
+    pruned_model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, verbose=1, shuffle=True, callbacks=callbacks)
+    test_loss, test_acc = pruned_model.evaluate(x_test, y_test)
+    print(f"Test accuracy after pruning: {test_acc:.3f}")
+
+    # Remove all variables that pruning only needed in the training phase.
+    model_for_export = tfmot.sparsity.keras.strip_pruning(pruned_model)
+
+    # Apply post-training quantization on top of the pruning and save the resulting TensorFlow Lite model to file.
+    tflite_model = post_training_quantize(model_for_export, x_train)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    pruned_quant_model_save_path = tflite_models_dir / 'pruned_post_training_quant_model.tflite'
+    with open(pruned_quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test the pruned quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(pruned_quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/release_notes.txt b/release_notes.txt
new file mode 100644
index 0000000..13c40a7
--- /dev/null
+++ b/release_notes.txt
@@ -0,0 +1,37 @@
+Changes in 21.03
+    * simple platform support added
+    * model conditioning examples added
+    * documentation updated
+    * build changed to use sources of the dependency libraries
+    * tests for native platform added
+    * anomaly detection use case added
+
+Changes in 20.11
+    * SSE-200 and SSE-300 system support was added.
+    * Support for simple fixed virtual platform for Ethos-U55 and Cortex-M55 removed.
+    * Build cmake parameters changed: TARGET_SUBSYSTEM was added, TARGET_PLATFORM accepted values were changed.
+    * Models with multiple output tensors support was added.
+    * Generic inference runner use-case added.
+    * ASR triggered by KWS added in the same use case (kws_asr). This also shows how to re-use tensor arena with two models using the same memory pool.
+
+Changes in 20.09 release:
+    * Support for TensorFlow Lite Micro version > 2.3.0 (tested with TensorFlow Lite Micro 2.4.0 commit hash: 5bbb8a2bd1def6865b1510175a3da5fd12387e10)
+    * Added speech recognition use case example.
+    * Updated Ethos-U55 Fastmodel version to r0p2-00eac0-rc4
+
+Changes in 20.08 release:
+    * Added keyword spotting use case example.
+    * Added person detection use case example.
+Known issues:
+    * telnet connection to FastModel environment may hang after some period of inactivity.
+
+Changes in 20.05 release:
+    * FastModel environment was built with FastModel Tools v11.10.22.
+    * Mps3 FPGA build support was added.
+    * Configurable timing-adaptor.
+    * Added Active and Idle cycle counts for NPU and CPU profiling report.
+    * Source code structure and build scripts refactored to support multiple ML use-cases.
+    * Used EAC Ethos-U55 software model and drivers.
+    * Windows support for build scripts.
+Known issues:
+    * telnet connection to FastModel environment may hang after some period of inactivity.
\ No newline at end of file
diff --git a/resources/LICENSE_CC_1.0.txt b/resources/LICENSE_CC_1.0.txt
new file mode 100644
index 0000000..d727803
--- /dev/null
+++ b/resources/LICENSE_CC_1.0.txt
@@ -0,0 +1,51 @@
+Creative Commons Attribution 1.0
+
+=======================================================================
+
+CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DRAFT LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.
+
+License
+
+THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE IS PROHIBITED.
+
+BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+
+    1. Definitions
+        a. "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License.
+        b. "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License.
+        c. "Licensor" means the individual or entity that offers the Work under the terms of this License.
+        d. "Original Author" means the individual or entity who created the Work.
+        e. "Work" means the copyrightable work of authorship offered under the terms of this License.
+        f. "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation.
+    2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws.
+    3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below:
+        a. to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works;
+        b. to create and reproduce Derivative Works;
+        c. to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works;
+        d. to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works;
+
+        The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved.
+    4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:
+        a. You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any reference to such Licensor or the Original Author, as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any reference to such Licensor or the Original Author, as requested.
+        b. If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and give the Original Author credit reasonable to the medium or means You are utilizing by conveying the name (or pseudonym if applicable) of the Original Author if supplied; the title of the Work if supplied; in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit.
+    5. Representations, Warranties and Disclaimer
+        a. By offering the Work for public release under this License, Licensor represents and warrants that, to the best of Licensor's knowledge after reasonable inquiry:
+            i. Licensor has secured all rights in the Work necessary to grant the license rights hereunder and to permit the lawful exercise of the rights granted hereunder without You having any obligation to pay any royalties, compulsory license fees, residuals or any other payments;
+            ii. The Work does not infringe the copyright, trademark, publicity rights, common law rights or any other right of any third party or constitute defamation, invasion of privacy or other tortious injury to any third party.
+        b. EXCEPT AS EXPRESSLY STATED IN THIS LICENSE OR OTHERWISE AGREED IN WRITING OR REQUIRED BY APPLICABLE LAW, THE WORK IS LICENSED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES REGARDING THE CONTENTS OR ACCURACY OF THE WORK.
+    6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, AND EXCEPT FOR DAMAGES ARISING FROM LIABILITY TO A THIRD PARTY RESULTING FROM BREACH OF THE WARRANTIES IN SECTION 5, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+    7. Termination
+        a. This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.
+        b. Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above.
+    8. Miscellaneous
+        a. Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License.
+        b. Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License.
+        c. If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
+        d. No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent.
+        e. This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You.
+
+Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor.
+
+Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, neither party will use the trademark "Creative Commons" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time.
+
+Creative Commons may be contacted at http://creativecommons.org/.
diff --git a/resources/LICENSE_CC_4.0.txt b/resources/LICENSE_CC_4.0.txt
new file mode 100644
index 0000000..b877574
--- /dev/null
+++ b/resources/LICENSE_CC_4.0.txt
@@ -0,0 +1,397 @@
+Creative Commons
+
+Attribution 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+     Considerations for licensors: Our public licenses are
+     intended for use by those authorized to give the public
+     permission to use material in ways otherwise restricted by
+     copyright and certain other rights. Our licenses are
+     irrevocable. Licensors should read and understand the terms
+     and conditions of the license they choose before applying it.
+     Licensors should also secure all rights necessary before
+     applying our licenses so that the public can reuse the
+     material as expected. Licensors should clearly mark any
+     material not subject to the license. This includes other CC-
+     licensed material, or material used under an exception or
+     limitation to copyright. More considerations for licensors:
+   wiki.creativecommons.org/Considerations_for_licensors
+
+     Considerations for the public: By using one of our public
+     licenses, a licensor grants the public permission to use the
+     licensed material under specified terms and conditions. If
+     the licensor's permission is not necessary for any reason--for
+     example, because of any applicable exception or limitation to
+     copyright--then that use is not regulated by the license. Our
+     licenses grant only permissions under copyright and certain
+     other rights that a licensor has authority to grant. Use of
+     the licensed material may still be restricted for other
+     reasons, including because others have copyright or other
+     rights in the material. A licensor may make special requests,
+     such as asking that all changes be marked or described.
+     Although not required by our licenses, you are encouraged to
+     respect those requests where reasonable. More_considerations
+     for the public:
+   wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution 4.0 International Public License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution 4.0 International Public License ("Public License"). To the
+extent this Public License may be interpreted as a contract, You are
+granted the Licensed Rights in consideration of Your acceptance of
+these terms and conditions, and the Licensor grants You such rights in
+consideration of benefits the Licensor receives from making the
+Licensed Material available under these terms and conditions.
+
+
+Section 1 -- Definitions.
+
+  a. Adapted Material means material subject to Copyright and Similar
+     Rights that is derived from or based upon the Licensed Material
+     and in which the Licensed Material is translated, altered,
+     arranged, transformed, or otherwise modified in a manner requiring
+     permission under the Copyright and Similar Rights held by the
+     Licensor. For purposes of this Public License, where the Licensed
+     Material is a musical work, performance, or sound recording,
+     Adapted Material is always produced where the Licensed Material is
+     synched in timed relation with a moving image.
+
+  b. Adapter's License means the license You apply to Your Copyright
+     and Similar Rights in Your contributions to Adapted Material in
+     accordance with the terms and conditions of this Public License.
+
+  c. Copyright and Similar Rights means copyright and/or similar rights
+     closely related to copyright including, without limitation,
+     performance, broadcast, sound recording, and Sui Generis Database
+     Rights, without regard to how the rights are labeled or
+     categorized. For purposes of this Public License, the rights
+     specified in Section 2(b)(1)-(2) are not Copyright and Similar
+     Rights.
+
+  d. Effective Technological Measures means those measures that, in the
+     absence of proper authority, may not be circumvented under laws
+     fulfilling obligations under Article 11 of the WIPO Copyright
+     Treaty adopted on December 20, 1996, and/or similar international
+     agreements.
+
+  e. Exceptions and Limitations means fair use, fair dealing, and/or
+     any other exception or limitation to Copyright and Similar Rights
+     that applies to Your use of the Licensed Material.
+
+  f. Licensed Material means the artistic or literary work, database,
+     or other material to which the Licensor applied this Public
+     License.
+
+  g. Licensed Rights means the rights granted to You subject to the
+     terms and conditions of this Public License, which are limited to
+     all Copyright and Similar Rights that apply to Your use of the
+     Licensed Material and that the Licensor has authority to license.
+
+  h. Licensor means the individual(s) or entity(ies) granting rights
+     under this Public License.
+
+  i. Share means to provide material to the public by any means or
+     process that requires permission under the Licensed Rights, such
+     as reproduction, public display, public performance, distribution,
+     dissemination, communication, or importation, and to make material
+     available to the public including in ways that members of the
+     public may access the material from a place and at a time
+     individually chosen by them.
+
+  j. Sui Generis Database Rights means rights other than copyright
+     resulting from Directive 96/9/EC of the European Parliament and of
+     the Council of 11 March 1996 on the legal protection of databases,
+     as amended and/or succeeded, as well as other essentially
+     equivalent rights anywhere in the world.
+
+  k. You means the individual or entity exercising the Licensed Rights
+     under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+  a. License grant.
+
+       1. Subject to the terms and conditions of this Public License,
+          the Licensor hereby grants You a worldwide, royalty-free,
+          non-sublicensable, non-exclusive, irrevocable license to
+          exercise the Licensed Rights in the Licensed Material to:
+
+            a. reproduce and Share the Licensed Material, in whole or
+               in part; and
+
+            b. produce, reproduce, and Share Adapted Material.
+
+       2. Exceptions and Limitations. For the avoidance of doubt, where
+          Exceptions and Limitations apply to Your use, this Public
+          License does not apply, and You do not need to comply with
+          its terms and conditions.
+
+       3. Term. The term of this Public License is specified in Section
+          6(a).
+
+       4. Media and formats; technical modifications allowed. The
+          Licensor authorizes You to exercise the Licensed Rights in
+          all media and formats whether now known or hereafter created,
+          and to make technical modifications necessary to do so. The
+          Licensor waives and/or agrees not to assert any right or
+          authority to forbid You from making technical modifications
+          necessary to exercise the Licensed Rights, including
+          technical modifications necessary to circumvent Effective
+          Technological Measures. For purposes of this Public License,
+          simply making modifications authorized by this Section 2(a)
+          (4) never produces Adapted Material.
+
+       5. Downstream recipients.
+
+            a. Offer from the Licensor -- Licensed Material. Every
+               recipient of the Licensed Material automatically
+               receives an offer from the Licensor to exercise the
+               Licensed Rights under the terms and conditions of this
+               Public License.
+
+            b. No downstream restrictions. You may not offer or impose
+               any additional or different terms or conditions on, or
+               apply any Effective Technological Measures to, the
+               Licensed Material if doing so restricts exercise of the
+               Licensed Rights by any recipient of the Licensed
+               Material.
+
+       6. No endorsement. Nothing in this Public License constitutes or
+          may be construed as permission to assert or imply that You
+          are, or that Your use of the Licensed Material is, connected
+          with, or sponsored, endorsed, or granted official status by,
+          the Licensor or others designated to receive attribution as
+          provided in Section 3(a)(1)(A)(i).
+
+  b. Other rights.
+
+       1. Moral rights, such as the right of integrity, are not
+          licensed under this Public License, nor are publicity,
+          privacy, and/or other similar personality rights; however, to
+          the extent possible, the Licensor waives and/or agrees not to
+          assert any such rights held by the Licensor to the limited
+          extent necessary to allow You to exercise the Licensed
+          Rights, but not otherwise.
+
+       2. Patent and trademark rights are not licensed under this
+          Public License.
+
+       3. To the extent possible, the Licensor waives any right to
+          collect royalties from You for the exercise of the Licensed
+          Rights, whether directly or through a collecting society
+          under any voluntary or waivable statutory or compulsory
+          licensing scheme. In all other cases the Licensor expressly
+          reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+  a. Attribution.
+
+       1. If You Share the Licensed Material (including in modified
+          form), You must:
+
+            a. retain the following if it is supplied by the Licensor
+               with the Licensed Material:
+
+                 i. identification of the creator(s) of the Licensed
+                    Material and any others designated to receive
+                    attribution, in any reasonable manner requested by
+                    the Licensor (including by pseudonym if
+                    designated);
+
+                ii. a copyright notice;
+
+               iii. a notice that refers to this Public License;
+
+                iv. a notice that refers to the disclaimer of
+                    warranties;
+
+                 v. a URI or hyperlink to the Licensed Material to the
+                    extent reasonably practicable;
+
+            b. indicate if You modified the Licensed Material and
+               retain an indication of any previous modifications; and
+
+            c. indicate the Licensed Material is licensed under this
+               Public License, and include the text of, or the URI or
+               hyperlink to, this Public License.
+
+       2. You may satisfy the conditions in Section 3(a)(1) in any
+          reasonable manner based on the medium, means, and context in
+          which You Share the Licensed Material. For example, it may be
+          reasonable to satisfy the conditions by providing a URI or
+          hyperlink to a resource that includes the required
+          information.
+
+       3. If requested by the Licensor, You must remove any of the
+          information required by Section 3(a)(1)(A) to the extent
+          reasonably practicable.
+
+       4. If You Share Adapted Material You produce, the Adapter's
+          License You apply must not prevent recipients of the Adapted
+          Material from complying with this Public License.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+  a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+     to extract, reuse, reproduce, and Share all or a substantial
+     portion of the contents of the database;
+
+  b. if You include all or a substantial portion of the database
+     contents in a database in which You have Sui Generis Database
+     Rights, then the database in which You have Sui Generis Database
+     Rights (but not its individual contents) is Adapted Material; and
+
+  c. You must comply with the conditions in Section 3(a) if You Share
+     all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+  c. The disclaimer of warranties and limitation of liability provided
+     above shall be interpreted in a manner that, to the extent
+     possible, most closely approximates an absolute disclaimer and
+     waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+  a. This Public License applies for the term of the Copyright and
+     Similar Rights licensed here. However, if You fail to comply with
+     this Public License, then Your rights under this Public License
+     terminate automatically.
+
+  b. Where Your right to use the Licensed Material has terminated under
+     Section 6(a), it reinstates:
+
+       1. automatically as of the date the violation is cured, provided
+          it is cured within 30 days of Your discovery of the
+          violation; or
+
+       2. upon express reinstatement by the Licensor.
+
+     For the avoidance of doubt, this Section 6(b) does not affect any
+     right the Licensor may have to seek remedies for Your violations
+     of this Public License.
+
+  c. For the avoidance of doubt, the Licensor may also offer the
+     Licensed Material under separate terms or conditions or stop
+     distributing the Licensed Material at any time; however, doing so
+     will not terminate this Public License.
+
+  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+     License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+  a. The Licensor shall not be bound by any additional or different
+     terms or conditions communicated by You unless expressly agreed.
+
+  b. Any arrangements, understandings, or agreements regarding the
+     Licensed Material not stated herein are separate from and
+     independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+  a. For the avoidance of doubt, this Public License does not, and
+     shall not be interpreted to, reduce, limit, restrict, or impose
+     conditions on any use of the Licensed Material that could lawfully
+     be made without permission under this Public License.
+
+  b. To the extent possible, if any provision of this Public License is
+     deemed unenforceable, it shall be automatically reformed to the
+     minimum extent necessary to make it enforceable. If the provision
+     cannot be reformed, it shall be severed from this Public License
+     without affecting the enforceability of the remaining terms and
+     conditions.
+
+  c. No term or condition of this Public License will be waived and no
+     failure to comply consented to unless expressly agreed to by the
+     Licensor.
+
+  d. Nothing in this Public License constitutes or may be interpreted
+     as a limitation upon, or waiver of, any privileges and immunities
+     that apply to the Licensor or You, including from the legal
+     processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public
+licenses. Notwithstanding, Creative Commons may elect to apply one of
+its public licenses to material it publishes and in those instances
+will be considered the “Licensor.” The text of the Creative Commons
+public licenses is dedicated to the public domain under the CC0 Public
+Domain Dedication. Except for the limited purpose of indicating that
+material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the
+public licenses.
+
+Creative Commons may be contacted at creativecommons.org.
\ No newline at end of file
diff --git a/resources/ad/labels/placeholder.txt b/resources/ad/labels/placeholder.txt
new file mode 100644
index 0000000..b3a4252
--- /dev/null
+++ b/resources/ad/labels/placeholder.txt
@@ -0,0 +1 @@
+placeholder
\ No newline at end of file
diff --git a/resources/ad/samples/files.md b/resources/ad/samples/files.md
new file mode 100644
index 0000000..95b1479
--- /dev/null
+++ b/resources/ad/samples/files.md
@@ -0,0 +1,17 @@
+# Sample wav audio clip
+
+For this use case sample audio clips aren't provided.
+
+The data used for this application sample comes from
+[https://zenodo.org/record/3384388\#.X6GILFNKiqA](https://zenodo.org/record/3384388\#.X6GILFNKiqA)
+and the model included in this example is trained on the ‘Slider’ part of the dataset.
+
+The machine ID (00, 02, 04, 06) the clip comes from must be in the file name for the application to work.
+
+The file name should have a pattern that matches
+e.g. `<any>_<text>_00_<here>.wav` if the audio was from machine ID 00
+or `<any>_<text>_02_<here>.wav` if it was from machine ID 02 etc.
+For example:
+
+- `anomaly_id_00_00000000.wav`
+- `normal_id_00_00000004.wav`
diff --git a/resources/asr/labels/labels_wav2letter.txt b/resources/asr/labels/labels_wav2letter.txt
new file mode 100644
index 0000000..8fb2fc8
--- /dev/null
+++ b/resources/asr/labels/labels_wav2letter.txt
@@ -0,0 +1,29 @@
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+'
+ 
+$
\ No newline at end of file
diff --git a/resources/asr/samples/anotherdoor.wav b/resources/asr/samples/anotherdoor.wav
new file mode 100644
index 0000000..ee08f06
--- /dev/null
+++ b/resources/asr/samples/anotherdoor.wav
Binary files differ
diff --git a/resources/asr/samples/anotherengineer.wav b/resources/asr/samples/anotherengineer.wav
new file mode 100644
index 0000000..36faef8
--- /dev/null
+++ b/resources/asr/samples/anotherengineer.wav
Binary files differ
diff --git a/resources/asr/samples/files.md b/resources/asr/samples/files.md
new file mode 100644
index 0000000..03b988b
--- /dev/null
+++ b/resources/asr/samples/files.md
@@ -0,0 +1,17 @@
+# Sample wav audio clip
+
+The sample wav audio clips provided are under Creative Commons License (Creative Commons Attribution 4.0 International Public License).
+The source is Librispeech ASR Corpus (http://www.openslr.org/12/)- the files were converted from flac to wav. The files used are listed here for traceability:
+
+- testingroutine.wav (orig - 251-137823-0002.flac)
+  - "This isn't part of your testing routine is it"
+- anotherengineer.wav (orig - 251-137823-0003.flac)
+  - "Another engineer rushed toward the door to see what was happening outside"
+- anotherdoor.wav (orig - 3536-23268-0010.flac)
+  - "And he walked immediately out of the apartment by another door"
+- itellyou.wav (orig - 251-118436-0001.flac)
+  - "I tell you it is not poison she cried"
+
+## License
+
+[Creative Commons Attribution 4.0 International Public License](../../LICENSE_CC_4.0.txt).
diff --git a/resources/asr/samples/itellyou.wav b/resources/asr/samples/itellyou.wav
new file mode 100644
index 0000000..001ce80
--- /dev/null
+++ b/resources/asr/samples/itellyou.wav
Binary files differ
diff --git a/resources/asr/samples/testingroutine.wav b/resources/asr/samples/testingroutine.wav
new file mode 100644
index 0000000..0d8da6b
--- /dev/null
+++ b/resources/asr/samples/testingroutine.wav
Binary files differ
diff --git a/resources/img_class/labels/labels_mobilenet_v2_1.0_224.txt b/resources/img_class/labels/labels_mobilenet_v2_1.0_224.txt
new file mode 100644
index 0000000..0ce2451
--- /dev/null
+++ b/resources/img_class/labels/labels_mobilenet_v2_1.0_224.txt
@@ -0,0 +1,1001 @@
+background
+tench, Tinca tinca
+goldfish, Carassius auratus
+great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
+tiger shark, Galeocerdo cuvieri
+hammerhead, hammerhead shark
+electric ray, crampfish, numbfish, torpedo
+stingray
+cock
+hen
+ostrich, Struthio camelus
+brambling, Fringilla montifringilla
+goldfinch, Carduelis carduelis
+house finch, linnet, Carpodacus mexicanus
+junco, snowbird
+indigo bunting, indigo finch, indigo bird, Passerina cyanea
+robin, American robin, Turdus migratorius
+bulbul
+jay
+magpie
+chickadee
+water ouzel, dipper
+kite
+bald eagle, American eagle, Haliaeetus leucocephalus
+vulture
+great grey owl, great gray owl, Strix nebulosa
+European fire salamander, Salamandra salamandra
+common newt, Triturus vulgaris
+eft
+spotted salamander, Ambystoma maculatum
+axolotl, mud puppy, Ambystoma mexicanum
+bullfrog, Rana catesbeiana
+tree frog, tree-frog
+tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
+loggerhead, loggerhead turtle, Caretta caretta
+leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
+mud turtle
+terrapin
+box turtle, box tortoise
+banded gecko
+common iguana, iguana, Iguana iguana
+American chameleon, anole, Anolis carolinensis
+whiptail, whiptail lizard
+agama
+frilled lizard, Chlamydosaurus kingi
+alligator lizard
+Gila monster, Heloderma suspectum
+green lizard, Lacerta viridis
+African chameleon, Chamaeleo chamaeleon
+Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis
+African crocodile, Nile crocodile, Crocodylus niloticus
+American alligator, Alligator mississipiensis
+triceratops
+thunder snake, worm snake, Carphophis amoenus
+ringneck snake, ring-necked snake, ring snake
+hognose snake, puff adder, sand viper
+green snake, grass snake
+king snake, kingsnake
+garter snake, grass snake
+water snake
+vine snake
+night snake, Hypsiglena torquata
+boa constrictor, Constrictor constrictor
+rock python, rock snake, Python sebae
+Indian cobra, Naja naja
+green mamba
+sea snake
+horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
+diamondback, diamondback rattlesnake, Crotalus adamanteus
+sidewinder, horned rattlesnake, Crotalus cerastes
+trilobite
+harvestman, daddy longlegs, Phalangium opilio
+scorpion
+black and gold garden spider, Argiope aurantia
+barn spider, Araneus cavaticus
+garden spider, Aranea diademata
+black widow, Latrodectus mactans
+tarantula
+wolf spider, hunting spider
+tick
+centipede
+black grouse
+ptarmigan
+ruffed grouse, partridge, Bonasa umbellus
+prairie chicken, prairie grouse, prairie fowl
+peacock
+quail
+partridge
+African grey, African gray, Psittacus erithacus
+macaw
+sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
+lorikeet
+coucal
+bee eater
+hornbill
+hummingbird
+jacamar
+toucan
+drake
+red-breasted merganser, Mergus serrator
+goose
+black swan, Cygnus atratus
+tusker
+echidna, spiny anteater, anteater
+platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus
+wallaby, brush kangaroo
+koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
+wombat
+jellyfish
+sea anemone, anemone
+brain coral
+flatworm, platyhelminth
+nematode, nematode worm, roundworm
+conch
+snail
+slug
+sea slug, nudibranch
+chiton, coat-of-mail shell, sea cradle, polyplacophore
+chambered nautilus, pearly nautilus, nautilus
+Dungeness crab, Cancer magister
+rock crab, Cancer irroratus
+fiddler crab
+king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica
+American lobster, Northern lobster, Maine lobster, Homarus americanus
+spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
+crayfish, crawfish, crawdad, crawdaddy
+hermit crab
+isopod
+white stork, Ciconia ciconia
+black stork, Ciconia nigra
+spoonbill
+flamingo
+little blue heron, Egretta caerulea
+American egret, great white heron, Egretta albus
+bittern
+crane
+limpkin, Aramus pictus
+European gallinule, Porphyrio porphyrio
+American coot, marsh hen, mud hen, water hen, Fulica americana
+bustard
+ruddy turnstone, Arenaria interpres
+red-backed sandpiper, dunlin, Erolia alpina
+redshank, Tringa totanus
+dowitcher
+oystercatcher, oyster catcher
+pelican
+king penguin, Aptenodytes patagonica
+albatross, mollymawk
+grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus
+killer whale, killer, orca, grampus, sea wolf, Orcinus orca
+dugong, Dugong dugon
+sea lion
+Chihuahua
+Japanese spaniel
+Maltese dog, Maltese terrier, Maltese
+Pekinese, Pekingese, Peke
+Shih-Tzu
+Blenheim spaniel
+papillon
+toy terrier
+Rhodesian ridgeback
+Afghan hound, Afghan
+basset, basset hound
+beagle
+bloodhound, sleuthhound
+bluetick
+black-and-tan coonhound
+Walker hound, Walker foxhound
+English foxhound
+redbone
+borzoi, Russian wolfhound
+Irish wolfhound
+Italian greyhound
+whippet
+Ibizan hound, Ibizan Podenco
+Norwegian elkhound, elkhound
+otterhound, otter hound
+Saluki, gazelle hound
+Scottish deerhound, deerhound
+Weimaraner
+Staffordshire bullterrier, Staffordshire bull terrier
+American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier
+Bedlington terrier
+Border terrier
+Kerry blue terrier
+Irish terrier
+Norfolk terrier
+Norwich terrier
+Yorkshire terrier
+wire-haired fox terrier
+Lakeland terrier
+Sealyham terrier, Sealyham
+Airedale, Airedale terrier
+cairn, cairn terrier
+Australian terrier
+Dandie Dinmont, Dandie Dinmont terrier
+Boston bull, Boston terrier
+miniature schnauzer
+giant schnauzer
+standard schnauzer
+Scotch terrier, Scottish terrier, Scottie
+Tibetan terrier, chrysanthemum dog
+silky terrier, Sydney silky
+soft-coated wheaten terrier
+West Highland white terrier
+Lhasa, Lhasa apso
+flat-coated retriever
+curly-coated retriever
+golden retriever
+Labrador retriever
+Chesapeake Bay retriever
+German short-haired pointer
+vizsla, Hungarian pointer
+English setter
+Irish setter, red setter
+Gordon setter
+Brittany spaniel
+clumber, clumber spaniel
+English springer, English springer spaniel
+Welsh springer spaniel
+cocker spaniel, English cocker spaniel, cocker
+Sussex spaniel
+Irish water spaniel
+kuvasz
+schipperke
+groenendael
+malinois
+briard
+kelpie
+komondor
+Old English sheepdog, bobtail
+Shetland sheepdog, Shetland sheep dog, Shetland
+collie
+Border collie
+Bouvier des Flandres, Bouviers des Flandres
+Rottweiler
+German shepherd, German shepherd dog, German police dog, alsatian
+Doberman, Doberman pinscher
+miniature pinscher
+Greater Swiss Mountain dog
+Bernese mountain dog
+Appenzeller
+EntleBucher
+boxer
+bull mastiff
+Tibetan mastiff
+French bulldog
+Great Dane
+Saint Bernard, St Bernard
+Eskimo dog, husky
+malamute, malemute, Alaskan malamute
+Siberian husky
+dalmatian, coach dog, carriage dog
+affenpinscher, monkey pinscher, monkey dog
+basenji
+pug, pug-dog
+Leonberg
+Newfoundland, Newfoundland dog
+Great Pyrenees
+Samoyed, Samoyede
+Pomeranian
+chow, chow chow
+keeshond
+Brabancon griffon
+Pembroke, Pembroke Welsh corgi
+Cardigan, Cardigan Welsh corgi
+toy poodle
+miniature poodle
+standard poodle
+Mexican hairless
+timber wolf, grey wolf, gray wolf, Canis lupus
+white wolf, Arctic wolf, Canis lupus tundrarum
+red wolf, maned wolf, Canis rufus, Canis niger
+coyote, prairie wolf, brush wolf, Canis latrans
+dingo, warrigal, warragal, Canis dingo
+dhole, Cuon alpinus
+African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
+hyena, hyaena
+red fox, Vulpes vulpes
+kit fox, Vulpes macrotis
+Arctic fox, white fox, Alopex lagopus
+grey fox, gray fox, Urocyon cinereoargenteus
+tabby, tabby cat
+tiger cat
+Persian cat
+Siamese cat, Siamese
+Egyptian cat
+cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
+lynx, catamount
+leopard, Panthera pardus
+snow leopard, ounce, Panthera uncia
+jaguar, panther, Panthera onca, Felis onca
+lion, king of beasts, Panthera leo
+tiger, Panthera tigris
+cheetah, chetah, Acinonyx jubatus
+brown bear, bruin, Ursus arctos
+American black bear, black bear, Ursus americanus, Euarctos americanus
+ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
+sloth bear, Melursus ursinus, Ursus ursinus
+mongoose
+meerkat, mierkat
+tiger beetle
+ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
+ground beetle, carabid beetle
+long-horned beetle, longicorn, longicorn beetle
+leaf beetle, chrysomelid
+dung beetle
+rhinoceros beetle
+weevil
+fly
+bee
+ant, emmet, pismire
+grasshopper, hopper
+cricket
+walking stick, walkingstick, stick insect
+cockroach, roach
+mantis, mantid
+cicada, cicala
+leafhopper
+lacewing, lacewing fly
+dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk
+damselfly
+admiral
+ringlet, ringlet butterfly
+monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
+cabbage butterfly
+sulphur butterfly, sulfur butterfly
+lycaenid, lycaenid butterfly
+starfish, sea star
+sea urchin
+sea cucumber, holothurian
+wood rabbit, cottontail, cottontail rabbit
+hare
+Angora, Angora rabbit
+hamster
+porcupine, hedgehog
+fox squirrel, eastern fox squirrel, Sciurus niger
+marmot
+beaver
+guinea pig, Cavia cobaya
+sorrel
+zebra
+hog, pig, grunter, squealer, Sus scrofa
+wild boar, boar, Sus scrofa
+warthog
+hippopotamus, hippo, river horse, Hippopotamus amphibius
+ox
+water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
+bison
+ram, tup
+bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis
+ibex, Capra ibex
+hartebeest
+impala, Aepyceros melampus
+gazelle
+Arabian camel, dromedary, Camelus dromedarius
+llama
+weasel
+mink
+polecat, fitch, foulmart, foumart, Mustela putorius
+black-footed ferret, ferret, Mustela nigripes
+otter
+skunk, polecat, wood pussy
+badger
+armadillo
+three-toed sloth, ai, Bradypus tridactylus
+orangutan, orang, orangutang, Pongo pygmaeus
+gorilla, Gorilla gorilla
+chimpanzee, chimp, Pan troglodytes
+gibbon, Hylobates lar
+siamang, Hylobates syndactylus, Symphalangus syndactylus
+guenon, guenon monkey
+patas, hussar monkey, Erythrocebus patas
+baboon
+macaque
+langur
+colobus, colobus monkey
+proboscis monkey, Nasalis larvatus
+marmoset
+capuchin, ringtail, Cebus capucinus
+howler monkey, howler
+titi, titi monkey
+spider monkey, Ateles geoffroyi
+squirrel monkey, Saimiri sciureus
+Madagascar cat, ring-tailed lemur, Lemur catta
+indri, indris, Indri indri, Indri brevicaudatus
+Indian elephant, Elephas maximus
+African elephant, Loxodonta africana
+lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
+giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
+barracouta, snoek
+eel
+coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch
+rock beauty, Holocanthus tricolor
+anemone fish
+sturgeon
+gar, garfish, garpike, billfish, Lepisosteus osseus
+lionfish
+puffer, pufferfish, blowfish, globefish
+abacus
+abaya
+academic gown, academic robe, judge's robe
+accordion, piano accordion, squeeze box
+acoustic guitar
+aircraft carrier, carrier, flattop, attack aircraft carrier
+airliner
+airship, dirigible
+altar
+ambulance
+amphibian, amphibious vehicle
+analog clock
+apiary, bee house
+apron
+ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin
+assault rifle, assault gun
+backpack, back pack, knapsack, packsack, rucksack, haversack
+bakery, bakeshop, bakehouse
+balance beam, beam
+balloon
+ballpoint, ballpoint pen, ballpen, Biro
+Band Aid
+banjo
+bannister, banister, balustrade, balusters, handrail
+barbell
+barber chair
+barbershop
+barn
+barometer
+barrel, cask
+barrow, garden cart, lawn cart, wheelbarrow
+baseball
+basketball
+bassinet
+bassoon
+bathing cap, swimming cap
+bath towel
+bathtub, bathing tub, bath, tub
+beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
+beacon, lighthouse, beacon light, pharos
+beaker
+bearskin, busby, shako
+beer bottle
+beer glass
+bell cote, bell cot
+bib
+bicycle-built-for-two, tandem bicycle, tandem
+bikini, two-piece
+binder, ring-binder
+binoculars, field glasses, opera glasses
+birdhouse
+boathouse
+bobsled, bobsleigh, bob
+bolo tie, bolo, bola tie, bola
+bonnet, poke bonnet
+bookcase
+bookshop, bookstore, bookstall
+bottlecap
+bow
+bow tie, bow-tie, bowtie
+brass, memorial tablet, plaque
+brassiere, bra, bandeau
+breakwater, groin, groyne, mole, bulwark, seawall, jetty
+breastplate, aegis, egis
+broom
+bucket, pail
+buckle
+bulletproof vest
+bullet train, bullet
+butcher shop, meat market
+cab, hack, taxi, taxicab
+caldron, cauldron
+candle, taper, wax light
+cannon
+canoe
+can opener, tin opener
+cardigan
+car mirror
+carousel, carrousel, merry-go-round, roundabout, whirligig
+carpenter's kit, tool kit
+carton
+car wheel
+cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM
+cassette
+cassette player
+castle
+catamaran
+CD player
+cello, violoncello
+cellular telephone, cellular phone, cellphone, cell, mobile phone
+chain
+chainlink fence
+chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour
+chain saw, chainsaw
+chest
+chiffonier, commode
+chime, bell, gong
+china cabinet, china closet
+Christmas stocking
+church, church building
+cinema, movie theater, movie theatre, movie house, picture palace
+cleaver, meat cleaver, chopper
+cliff dwelling
+cloak
+clog, geta, patten, sabot
+cocktail shaker
+coffee mug
+coffeepot
+coil, spiral, volute, whorl, helix
+combination lock
+computer keyboard, keypad
+confectionery, confectionary, candy store
+container ship, containership, container vessel
+convertible
+corkscrew, bottle screw
+cornet, horn, trumpet, trump
+cowboy boot
+cowboy hat, ten-gallon hat
+cradle
+crane
+crash helmet
+crate
+crib, cot
+Crock Pot
+croquet ball
+crutch
+cuirass
+dam, dike, dyke
+desk
+desktop computer
+dial telephone, dial phone
+diaper, nappy, napkin
+digital clock
+digital watch
+dining table, board
+dishrag, dishcloth
+dishwasher, dish washer, dishwashing machine
+disk brake, disc brake
+dock, dockage, docking facility
+dogsled, dog sled, dog sleigh
+dome
+doormat, welcome mat
+drilling platform, offshore rig
+drum, membranophone, tympan
+drumstick
+dumbbell
+Dutch oven
+electric fan, blower
+electric guitar
+electric locomotive
+entertainment center
+envelope
+espresso maker
+face powder
+feather boa, boa
+file, file cabinet, filing cabinet
+fireboat
+fire engine, fire truck
+fire screen, fireguard
+flagpole, flagstaff
+flute, transverse flute
+folding chair
+football helmet
+forklift
+fountain
+fountain pen
+four-poster
+freight car
+French horn, horn
+frying pan, frypan, skillet
+fur coat
+garbage truck, dustcart
+gasmask, respirator, gas helmet
+gas pump, gasoline pump, petrol pump, island dispenser
+goblet
+go-kart
+golf ball
+golfcart, golf cart
+gondola
+gong, tam-tam
+gown
+grand piano, grand
+greenhouse, nursery, glasshouse
+grille, radiator grille
+grocery store, grocery, food market, market
+guillotine
+hair slide
+hair spray
+half track
+hammer
+hamper
+hand blower, blow dryer, blow drier, hair dryer, hair drier
+hand-held computer, hand-held microcomputer
+handkerchief, hankie, hanky, hankey
+hard disc, hard disk, fixed disk
+harmonica, mouth organ, harp, mouth harp
+harp
+harvester, reaper
+hatchet
+holster
+home theater, home theatre
+honeycomb
+hook, claw
+hoopskirt, crinoline
+horizontal bar, high bar
+horse cart, horse-cart
+hourglass
+iPod
+iron, smoothing iron
+jack-o'-lantern
+jean, blue jean, denim
+jeep, landrover
+jersey, T-shirt, tee shirt
+jigsaw puzzle
+jinrikisha, ricksha, rickshaw
+joystick
+kimono
+knee pad
+knot
+lab coat, laboratory coat
+ladle
+lampshade, lamp shade
+laptop, laptop computer
+lawn mower, mower
+lens cap, lens cover
+letter opener, paper knife, paperknife
+library
+lifeboat
+lighter, light, igniter, ignitor
+limousine, limo
+liner, ocean liner
+lipstick, lip rouge
+Loafer
+lotion
+loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
+loupe, jeweler's loupe
+lumbermill, sawmill
+magnetic compass
+mailbag, postbag
+mailbox, letter box
+maillot
+maillot, tank suit
+manhole cover
+maraca
+marimba, xylophone
+mask
+matchstick
+maypole
+maze, labyrinth
+measuring cup
+medicine chest, medicine cabinet
+megalith, megalithic structure
+microphone, mike
+microwave, microwave oven
+military uniform
+milk can
+minibus
+miniskirt, mini
+minivan
+missile
+mitten
+mixing bowl
+mobile home, manufactured home
+Model T
+modem
+monastery
+monitor
+moped
+mortar
+mortarboard
+mosque
+mosquito net
+motor scooter, scooter
+mountain bike, all-terrain bike, off-roader
+mountain tent
+mouse, computer mouse
+mousetrap
+moving van
+muzzle
+nail
+neck brace
+necklace
+nipple
+notebook, notebook computer
+obelisk
+oboe, hautboy, hautbois
+ocarina, sweet potato
+odometer, hodometer, mileometer, milometer
+oil filter
+organ, pipe organ
+oscilloscope, scope, cathode-ray oscilloscope, CRO
+overskirt
+oxcart
+oxygen mask
+packet
+paddle, boat paddle
+paddlewheel, paddle wheel
+padlock
+paintbrush
+pajama, pyjama, pj's, jammies
+palace
+panpipe, pandean pipe, syrinx
+paper towel
+parachute, chute
+parallel bars, bars
+park bench
+parking meter
+passenger car, coach, carriage
+patio, terrace
+pay-phone, pay-station
+pedestal, plinth, footstall
+pencil box, pencil case
+pencil sharpener
+perfume, essence
+Petri dish
+photocopier
+pick, plectrum, plectron
+pickelhaube
+picket fence, paling
+pickup, pickup truck
+pier
+piggy bank, penny bank
+pill bottle
+pillow
+ping-pong ball
+pinwheel
+pirate, pirate ship
+pitcher, ewer
+plane, carpenter's plane, woodworking plane
+planetarium
+plastic bag
+plate rack
+plow, plough
+plunger, plumber's helper
+Polaroid camera, Polaroid Land camera
+pole
+police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria
+poncho
+pool table, billiard table, snooker table
+pop bottle, soda bottle
+pot, flowerpot
+potter's wheel
+power drill
+prayer rug, prayer mat
+printer
+prison, prison house
+projectile, missile
+projector
+puck, hockey puck
+punching bag, punch bag, punching ball, punchball
+purse
+quill, quill pen
+quilt, comforter, comfort, puff
+racer, race car, racing car
+racket, racquet
+radiator
+radio, wireless
+radio telescope, radio reflector
+rain barrel
+recreational vehicle, RV, R.V.
+reel
+reflex camera
+refrigerator, icebox
+remote control, remote
+restaurant, eating house, eating place, eatery
+revolver, six-gun, six-shooter
+rifle
+rocking chair, rocker
+rotisserie
+rubber eraser, rubber, pencil eraser
+rugby ball
+rule, ruler
+running shoe
+safe
+safety pin
+saltshaker, salt shaker
+sandal
+sarong
+sax, saxophone
+scabbard
+scale, weighing machine
+school bus
+schooner
+scoreboard
+screen, CRT screen
+screw
+screwdriver
+seat belt, seatbelt
+sewing machine
+shield, buckler
+shoe shop, shoe-shop, shoe store
+shoji
+shopping basket
+shopping cart
+shovel
+shower cap
+shower curtain
+ski
+ski mask
+sleeping bag
+slide rule, slipstick
+sliding door
+slot, one-armed bandit
+snorkel
+snowmobile
+snowplow, snowplough
+soap dispenser
+soccer ball
+sock
+solar dish, solar collector, solar furnace
+sombrero
+soup bowl
+space bar
+space heater
+space shuttle
+spatula
+speedboat
+spider web, spider's web
+spindle
+sports car, sport car
+spotlight, spot
+stage
+steam locomotive
+steel arch bridge
+steel drum
+stethoscope
+stole
+stone wall
+stopwatch, stop watch
+stove
+strainer
+streetcar, tram, tramcar, trolley, trolley car
+stretcher
+studio couch, day bed
+stupa, tope
+submarine, pigboat, sub, U-boat
+suit, suit of clothes
+sundial
+sunglass
+sunglasses, dark glasses, shades
+sunscreen, sunblock, sun blocker
+suspension bridge
+swab, swob, mop
+sweatshirt
+swimming trunks, bathing trunks
+swing
+switch, electric switch, electrical switch
+syringe
+table lamp
+tank, army tank, armored combat vehicle, armoured combat vehicle
+tape player
+teapot
+teddy, teddy bear
+television, television system
+tennis ball
+thatch, thatched roof
+theater curtain, theatre curtain
+thimble
+thresher, thrasher, threshing machine
+throne
+tile roof
+toaster
+tobacco shop, tobacconist shop, tobacconist
+toilet seat
+torch
+totem pole
+tow truck, tow car, wrecker
+toyshop
+tractor
+trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi
+tray
+trench coat
+tricycle, trike, velocipede
+trimaran
+tripod
+triumphal arch
+trolleybus, trolley coach, trackless trolley
+trombone
+tub, vat
+turnstile
+typewriter keyboard
+umbrella
+unicycle, monocycle
+upright, upright piano
+vacuum, vacuum cleaner
+vase
+vault
+velvet
+vending machine
+vestment
+viaduct
+violin, fiddle
+volleyball
+waffle iron
+wall clock
+wallet, billfold, notecase, pocketbook
+wardrobe, closet, press
+warplane, military plane
+washbasin, handbasin, washbowl, lavabo, wash-hand basin
+washer, automatic washer, washing machine
+water bottle
+water jug
+water tower
+whiskey jug
+whistle
+wig
+window screen
+window shade
+Windsor tie
+wine bottle
+wing
+wok
+wooden spoon
+wool, woolen, woollen
+worm fence, snake fence, snake-rail fence, Virginia fence
+wreck
+yawl
+yurt
+web site, website, internet site, site
+comic book
+crossword puzzle, crossword
+street sign
+traffic light, traffic signal, stoplight
+book jacket, dust cover, dust jacket, dust wrapper
+menu
+plate
+guacamole
+consomme
+hot pot, hotpot
+trifle
+ice cream, icecream
+ice lolly, lolly, lollipop, popsicle
+French loaf
+bagel, beigel
+pretzel
+cheeseburger
+hotdog, hot dog, red hot
+mashed potato
+head cabbage
+broccoli
+cauliflower
+zucchini, courgette
+spaghetti squash
+acorn squash
+butternut squash
+cucumber, cuke
+artichoke, globe artichoke
+bell pepper
+cardoon
+mushroom
+Granny Smith
+strawberry
+orange
+lemon
+fig
+pineapple, ananas
+banana
+jackfruit, jak, jack
+custard apple
+pomegranate
+hay
+carbonara
+chocolate sauce, chocolate syrup
+dough
+meat loaf, meatloaf
+pizza, pizza pie
+potpie
+burrito
+red wine
+espresso
+cup
+eggnog
+alp
+bubble
+cliff, drop, drop-off
+coral reef
+geyser
+lakeside, lakeshore
+promontory, headland, head, foreland
+sandbar, sand bar
+seashore, coast, seacoast, sea-coast
+valley, vale
+volcano
+ballplayer, baseball player
+groom, bridegroom
+scuba diver
+rapeseed
+daisy
+yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum
+corn
+acorn
+hip, rose hip, rosehip
+buckeye, horse chestnut, conker
+coral fungus
+agaric
+gyromitra
+stinkhorn, carrion fungus
+earthstar
+hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa
+bolete
+ear, spike, capitulum
+toilet tissue, toilet paper, bathroom tissue
\ No newline at end of file
diff --git a/resources/img_class/samples/cat.bmp b/resources/img_class/samples/cat.bmp
new file mode 100644
index 0000000..b1f3c69
--- /dev/null
+++ b/resources/img_class/samples/cat.bmp
Binary files differ
diff --git a/resources/img_class/samples/dog.bmp b/resources/img_class/samples/dog.bmp
new file mode 100644
index 0000000..180ba3c
--- /dev/null
+++ b/resources/img_class/samples/dog.bmp
Binary files differ
diff --git a/resources/img_class/samples/files.md b/resources/img_class/samples/files.md
new file mode 100644
index 0000000..c031ecd
--- /dev/null
+++ b/resources/img_class/samples/files.md
@@ -0,0 +1,12 @@
+# Sample images
+
+The sample images provided are under Creative Commons License. The links are documented here for traceability:
+
+- [kimono.bmp](https://www.pexels.com/photo/three-geisha-walking-between-buildings-1325837/)
+- [tiger.bmp](https://www.pexels.com/photo/tiger-in-green-grass-near-the-tree-during-daytime-162173/)
+- [cat.bmp](https://www.pexels.com/photo/cat-whiskers-kitty-tabby-20787/)
+- [dog.bmp](https://www.pexels.com/photo/adult-black-pug-1851164/)
+
+## License
+
+[Creative Commons Attribution 1.0 Generic](../../LICENSE_CC_1.0.txt).
diff --git a/resources/img_class/samples/kimono.bmp b/resources/img_class/samples/kimono.bmp
new file mode 100644
index 0000000..c1274a5
--- /dev/null
+++ b/resources/img_class/samples/kimono.bmp
Binary files differ
diff --git a/resources/img_class/samples/tiger.bmp b/resources/img_class/samples/tiger.bmp
new file mode 100644
index 0000000..b53b0c0
--- /dev/null
+++ b/resources/img_class/samples/tiger.bmp
Binary files differ
diff --git a/resources/kws/labels/ds_cnn_labels.txt b/resources/kws/labels/ds_cnn_labels.txt
new file mode 100644
index 0000000..ba41645
--- /dev/null
+++ b/resources/kws/labels/ds_cnn_labels.txt
@@ -0,0 +1,12 @@
+_silence_
+_unknown_
+yes
+no
+up
+down
+left
+right
+on
+off
+stop
+go
\ No newline at end of file
diff --git a/resources/kws/samples/down.wav b/resources/kws/samples/down.wav
new file mode 100644
index 0000000..7c77f63
--- /dev/null
+++ b/resources/kws/samples/down.wav
Binary files differ
diff --git a/resources/kws/samples/files.md b/resources/kws/samples/files.md
new file mode 100644
index 0000000..29d42ae
--- /dev/null
+++ b/resources/kws/samples/files.md
@@ -0,0 +1,50 @@
+# Sample wav audio clip
+
+The sample wav audio clips provided are under Creative Commons License (Creative Commons Attribution 4.0 International Public License).
+The source is http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz, in particular the files used are listed here for traceability:
+
+- down.wav
+
+    ```tree
+    speech_commands_v0.02
+    └── down
+       └── 0a9f9af7_nohash_2.wav
+    ```
+
+- rightleftup.wav
+
+    ```tree
+    speech_commands_v0.02
+    ├── left
+    │   └── 0d82fd99_nohash_3.wav
+    ├── right
+    │   └── 0d82fd99_nohash_0.wav
+    └── up
+       └── 0a2b400e_nohash_1.wav
+    ```
+
+- yes.wav
+
+    ```tree
+    speech_commands_v0.02
+    └── yes
+       └── 0b40aa8e_nohash_0.wav
+    ```
+
+- yesnogostop.wav
+
+    ```tree
+    speech_commands_v0.02
+    ├── go
+    │   └── 0c2ca723_nohash_2.wav
+    ├── no
+    │   └── 0a2b400e_nohash_0.wav
+    ├── stop
+    │   └── 0a196374_nohash_0.wav
+    └── yes
+       └── 0b40aa8e_nohash_0.wav
+    ```
+
+## License
+
+[Creative Commons Attribution 4.0 International Public License](../../LICENSE_CC_4.0.txt).
diff --git a/resources/kws/samples/rightleftup.wav b/resources/kws/samples/rightleftup.wav
new file mode 100644
index 0000000..47551e8
--- /dev/null
+++ b/resources/kws/samples/rightleftup.wav
Binary files differ
diff --git a/resources/kws/samples/yes.wav b/resources/kws/samples/yes.wav
new file mode 100644
index 0000000..f3489db
--- /dev/null
+++ b/resources/kws/samples/yes.wav
Binary files differ
diff --git a/resources/kws/samples/yesnogostop.wav b/resources/kws/samples/yesnogostop.wav
new file mode 100644
index 0000000..2a2c0ac
--- /dev/null
+++ b/resources/kws/samples/yesnogostop.wav
Binary files differ
diff --git a/resources/kws_asr/labels/ds_cnn_labels.txt b/resources/kws_asr/labels/ds_cnn_labels.txt
new file mode 100644
index 0000000..ba41645
--- /dev/null
+++ b/resources/kws_asr/labels/ds_cnn_labels.txt
@@ -0,0 +1,12 @@
+_silence_
+_unknown_
+yes
+no
+up
+down
+left
+right
+on
+off
+stop
+go
\ No newline at end of file
diff --git a/resources/kws_asr/labels/labels_wav2letter.txt b/resources/kws_asr/labels/labels_wav2letter.txt
new file mode 100644
index 0000000..8fb2fc8
--- /dev/null
+++ b/resources/kws_asr/labels/labels_wav2letter.txt
@@ -0,0 +1,29 @@
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+'
+ 
+$
\ No newline at end of file
diff --git a/resources/kws_asr/samples/files.md b/resources/kws_asr/samples/files.md
new file mode 100644
index 0000000..6db8d65
--- /dev/null
+++ b/resources/kws_asr/samples/files.md
@@ -0,0 +1,22 @@
+# Sample wav audio clip
+
+The sample wav audio clips provided are under Creative Commons License (Creative Commons Attribution 4.0 International Public License).
+The source is http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz, in particular the files used are listed here for traceability:
+
+- yesnogostop.wav
+
+    ```tree
+    speech_commands_v0.02
+    ├── go
+    │   └── 0c2ca723_nohash_2.wav
+    ├── no
+    │   └── 0a2b400e_nohash_0.wav
+    ├── stop
+    │   └── 0a196374_nohash_0.wav
+    └── yes
+       └── 0b40aa8e_nohash_0.wav
+    ```
+
+## License
+
+[Creative Commons Attribution 4.0 International Public License](../../LICENSE_CC_4.0.txt).
diff --git a/resources/kws_asr/samples/yesnogostop.wav b/resources/kws_asr/samples/yesnogostop.wav
new file mode 100644
index 0000000..2a2c0ac
--- /dev/null
+++ b/resources/kws_asr/samples/yesnogostop.wav
Binary files differ
diff --git a/scripts/cmake/bare-metal-sources.cmake b/scripts/cmake/bare-metal-sources.cmake
new file mode 100644
index 0000000..3e24d7b
--- /dev/null
+++ b/scripts/cmake/bare-metal-sources.cmake
@@ -0,0 +1,170 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build_baremetal)
+set(PLAT_HAL ${CMAKE_CURRENT_SOURCE_DIR}/source/application/hal/platforms/bare-metal)
+
+# If target platform not defined raise an error
+# TARGET_PLATFORM either should have been defined by the user or set to default value mps3
+if (NOT DEFINED TARGET_PLATFORM)
+    message(FATAL_ERROR "Invalid target platform, specify TARGET_PLATFORM=mps3")
+endif ()
+message(STATUS "target platform ${TARGET_PLATFORM}")
+
+set(SOURCE_GEN_DIR          ${CMAKE_BINARY_DIR}/generated/bsp)
+if (NOT DEFINED MEM_PROFILES_SRC_DIR)
+    set(MEM_PROFILES_SRC_DIR    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/subsystem-profiles)
+endif()
+set(MEM_PROFILE_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/peripheral_memmap.h.template)
+set(IRQ_PROFILE_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/peripheral_irqs.h.template)
+set(MEM_REGIONS_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/mem_regions.h.template)
+set(TA_SETTINGS_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/timing_adapter_settings.template)
+set(TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME  "libtensorflow-microlite.a")
+set(TENSORFLOW_LITE_MICRO_FLAG               "-DTF_LITE_STATIC_MEMORY")
+set(ETHOS_U55_FLAG          "-DARM_NPU=1")
+
+if (ETHOS_U55_ENABLED)
+    set(OPTIONAL_FLAGS      "${OPTIONAL_FLAGS} ${ETHOS_U55_FLAG}")
+endif ()
+
+# Set specific flags depending on target platform and subsystem
+if (TARGET_PLATFORM STREQUAL mps3)
+    set(MPS3_PLATFORM_FLAG          "-DMPS3_PLATFORM=1")
+
+    # If target platform is mps3 and subsystem not defined raise an error,
+    # TARGET_SUBSYSTEM either should have been defined by the user or set to a default value
+    if (NOT DEFINED TARGET_SUBSYSTEM)
+        message(FATAL_ERROR "Target subsystem for mps3 undefined, "
+                            "specify -DTARGET_SUBSYSTEM=<sse-200 or sse-300>")
+    endif ()
+
+    if (TARGET_SUBSYSTEM STREQUAL sse-200 OR TARGET_SUBSYSTEM STREQUAL sse-300)
+        message(STATUS          "target subsystem is ${TARGET_SUBSYSTEM}")
+        set(BSP_PACKAGE_DIR     "${PLAT_HAL}/bsp/bsp-packs/mps3")
+        set(SCAT_FILE           "${PLAT_HAL}/bsp/mem_layout/mps3-${TARGET_SUBSYSTEM}.sct")
+
+        # Include the mem profile definitions specific to our target subsystem
+        include(${MEM_PROFILES_SRC_DIR}/corstone-${TARGET_SUBSYSTEM}.cmake)
+        set(OPTIONAL_FLAGS      "${OPTIONAL_FLAGS} ${MPS3_PLATFORM_FLAG}")
+    else ()
+        message(FATAL_ERROR "Non compatible target subsystem: ${TARGET_SUBSYSTEM}")
+    endif ()
+elseif (TARGET_PLATFORM STREQUAL simple_platform)
+    set(BSP_PACKAGE_DIR     "${PLAT_HAL}/bsp/bsp-packs/${TARGET_PLATFORM}")
+    set(SCAT_FILE           "${PLAT_HAL}/bsp/mem_layout/${TARGET_PLATFORM}.sct")
+    include(${MEM_PROFILES_SRC_DIR}/${TARGET_PLATFORM}.cmake)
+    set(OPTIONAL_FLAGS      "${OPTIONAL_FLAGS}")
+else ()
+    message(FATAL_ERROR "Non compatible target platform ${TARGET_PLATFORM}")
+endif ()
+
+if (ETHOS_U55_ENABLED)
+    USER_OPTION(TA_CONFIG_FILE "Path to the timing adapter configuration file"
+            "${CMAKE_SCRIPTS_DIR}/ta_config.cmake"
+            FILEPATH)
+
+    # must be included after target subsystem CMake file
+    include(${TA_CONFIG_FILE})
+endif()
+
+# Generate the memory map header file from the mem profile cmake included in one of
+# the previous sections
+message(STATUS "Configuring file from ${MEM_PROFILE_TEMPLATE}"
+                                   ", ${IRQ_PROFILE_TEMPLATE}"
+                                " and ${MEM_REGIONS_TEMPLATE}")
+
+configure_file("${MEM_PROFILE_TEMPLATE}" "${SOURCE_GEN_DIR}/peripheral_memmap.h")
+configure_file("${IRQ_PROFILE_TEMPLATE}" "${SOURCE_GEN_DIR}/peripheral_irqs.h")
+configure_file("${MEM_REGIONS_TEMPLATE}" "${SOURCE_GEN_DIR}/mem_regions.h")
+configure_file("${TA_SETTINGS_TEMPLATE}" "${SOURCE_GEN_DIR}/timing_adapter_settings.h")
+
+message(STATUS "Scatter file: ${SCAT_FILE}")
+message(STATUS "Using BSP package from: ${BSP_PACKAGE_DIR}")
+
+if (DEFINED VERIFY_TEST_OUTPUT)
+    message(STATUS "Test output verification flag is: ${VERIFY_TEST_OUTPUT}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -DVERIFY_TEST_OUTPUT=${VERIFY_TEST_OUTPUT}")
+endif ()
+
+if (DEFINED LOG_LEVEL)
+    message(STATUS "Setting log level to ${LOG_LEVEL}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -DLOG_LEVEL=${LOG_LEVEL}")
+endif()
+
+if (DEFINED ACTIVATION_BUF_SRAM_SZ)
+    message(STATUS "Maximum SRAM space for activations buffers for this system: ${ACTIVATION_BUF_SRAM_SZ}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -DACTIVATION_BUF_SRAM_SZ=${ACTIVATION_BUF_SRAM_SZ}")
+endif()
+
+if (DEFINED ARMCLANG_DEBUG_DWARF_LEVEL)
+    message(STATUS "setting dwarf conformance level to gdwarf-${ARMCLANG_DEBUG_DWARF_LEVEL}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -gdwarf-${ARMCLANG_DEBUG_DWARF_LEVEL}")
+endif()
+
+set(COMPILER_FLAGS              "${ALL_COMMON_FLAGS} ${TENSORFLOW_LITE_MICRO_FLAG} ${PROFILING_OPT} ${OPTIONAL_FLAGS}")
+# For some reason, cmake doesn't pass the c++ standard flag, adding it manually
+set(CMAKE_CXX_FLAGS             "${COMPILER_FLAGS} -std=c++11" CACHE INTERNAL "")
+set(CMAKE_C_FLAGS               "${COMPILER_FLAGS}" CACHE INTERNAL "")
+set(CMAKE_ASM_FLAGS             "${CPU_LD}")
+set(CMAKE_ASM_COMPILE_OBJECT    ${CMAKE_CXX_FLAGS})
+
+add_link_options(--strict --callgraph --load_addr_map_info --map)
+add_link_options(--symbols --xref --scatter=${SCAT_FILE})
+
+# Warnings to be ignored:
+# L6314W = No section matches pattern
+# L6439W = Multiply defined Global Symbol
+add_link_options(--diag_suppress=L6439W,L6314W)
+add_link_options(--info sizes,totals,unused,veneers --entry Reset_Handler)
+
+if (CMAKE_BUILD_TYPE STREQUAL Release)
+    add_link_options(--no_debug)
+endif ()
+
+set(CMAKE_EXE_LINKER_FLAGS "${CPU_LD}")
+
+set(PLAT_BSP_INCLUDES
+    ${PLAT_HAL}/bsp/cmsis-device/include
+    ${PLAT_HAL}/bsp/include/
+    ${PLAT_HAL}/bsp/bsp-core/include
+    ${BSP_PACKAGE_DIR}/include
+)
+
+# Include directories:
+set(PLAT_INCLUDE_DIRS
+    ${PLAT_BSP_INCLUDES}
+    ${PLAT_HAL}/utils/include
+    ${PLAT_HAL}/images/include
+    ${PLAT_HAL}/data_presentation/lcd/include
+    ${PLAT_HAL}/timer/include
+    ${SOURCE_GEN_DIR}
+    )
+
+# Source files
+file(GLOB_RECURSE SRC_PLAT_HAL
+
+    # Higher level HAL sources - software logic implementations
+    "${PLAT_HAL}/data_*/*.c"
+    "${PLAT_HAL}/images/*.c"
+    "${PLAT_HAL}/timer/*.c"
+    "${PLAT_HAL}/utils/*.c"
+
+    # Low level HAL sources - these enable interaction with
+    # the actual hardware
+    "${PLAT_HAL}/bsp/cmsis-device/*.c"
+    "${PLAT_HAL}/bsp/bsp-core/*.c"
+    "${BSP_PACKAGE_DIR}/*.c"
+    )
diff --git a/scripts/cmake/bare-metal-toolchain.cmake b/scripts/cmake/bare-metal-toolchain.cmake
new file mode 100644
index 0000000..5d91b98
--- /dev/null
+++ b/scripts/cmake/bare-metal-toolchain.cmake
@@ -0,0 +1,65 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+# specify the cross compiler
+set(CMAKE_C_COMPILER                armclang)
+set(CMAKE_CXX_COMPILER              armclang)
+set(CMAKE_C_LINKER_PREFERENCE       armlink)
+set(CMAKE_ASM_LINKER_PREFERENCE     armlink)
+set(CMAKE_ASM_COMPILER              armasm)
+set(CMAKE_ASM_COMPILER_AR           armar)
+
+set(CMAKE_CROSSCOMPILING            true)
+set(CMAKE_SYSTEM_NAME               Generic)
+
+set(MIN_ARM_CLANG_VERSION           6.14)
+
+if (NOT DEFINED CMAKE_SYSTEM_PROCESSOR)
+    set(CMAKE_SYSTEM_PROCESSOR      cortex-m55)
+endif()
+
+# Skip compiler test execution
+set(CMAKE_C_COMPILER_WORKS          1)
+set(CMAKE_CXX_COMPILER_WORKS        1)
+
+set(PLATFORM_HAL                    1)
+
+set(WARNING_OPTS                    "-Wall -Wextra -Wvla")
+set(SPECIAL_OPTS                    "-fno-rtti -funsigned-char -fno-function-sections -fno-exceptions")
+set(PLATFORM_FLAGS                  "-mthumb --target=arm-arm-non-eabi -mlittle-endian -DPLATFORM_HAL=${PLATFORM_HAL}")
+
+set(CMAKE_C_FLAGS_DEBUG             "-DDEBUG -O0")
+set(CMAKE_C_FLAGS_RELEASE           "-DNDEBUG -O3")
+
+set(CMAKE_CXX_FLAGS_DEBUG           "-DDEBUG -O0")
+set(CMAKE_CXX_FLAGS_RELEASE         "-DNDEBUG -O3")
+
+if (CMAKE_SYSTEM_PROCESSOR STREQUAL cortex-m55)
+    # Flags for cortex-m55
+    set(CPU_CORTEX_M55              1)
+    set(CPU_CC                      "-mcpu=cortex-m55 -mfloat-abi=hard -MD -DCPU_CORTEX_M55=1 -DARM_MATH_DSP -DARM_MATH_LOOPUNROLL -D__FPU_USED=1")
+    set(CPU_LD                      "--cpu=8.1-M.Main.dsp")
+elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL cortex-m33)
+    # Flags for cortex-m33 to go here
+endif()
+
+set(ALL_COMMON_FLAGS                "${CPU_CC} ${WARNING_OPTS} ${SPECIAL_OPTS} ${PLATFORM_FLAGS}")
+
+function(enforce_compiler_version)
+    if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${MIN_ARM_CLANG_VERSION})
+        message( FATAL_ERROR "Arm compiler version must be ${MIN_ARM_CLANG_VERSION} or greater to support ${CMAKE_SYSTEM_PROCESSOR} architecture." )
+    endif()
+endfunction()
diff --git a/scripts/cmake/cmsis-dsp.cmake b/scripts/cmake/cmsis-dsp.cmake
new file mode 100644
index 0000000..cb0243b
--- /dev/null
+++ b/scripts/cmake/cmsis-dsp.cmake
@@ -0,0 +1,74 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMSIS-DSP library CMake helper script.
+
+# 1. We should be cross-compiling (non-native target)
+if (TARGET_PLATFORM STREQUAL native)
+    message(FATAL_ERROR "No CMSIS-DSP support for native target.")
+endif()
+
+# 2. Check if CMSIS sources have been defined
+if (NOT DEFINED CMSIS_SRC_PATH)
+    message(FATAL_ERROR "CMSIS path should be defined for CMSIS-DSP library to be built")
+endif()
+
+# 3. Form a list of all the sources we need in CSMS-DSP library
+set(CMSIS_DSP_PATH_SUFFIX   "CMSIS/DSP")
+set(CMSIS_CORE_PATH_SUFFIX  "CMSIS/Core")
+set(CMSIS_DSP_SRC_DIR       "${CMSIS_SRC_PATH}/${CMSIS_DSP_PATH_SUFFIX}/Source")
+set(CMSIS_DSP_INC_DIR       "${CMSIS_SRC_PATH}/${CMSIS_DSP_PATH_SUFFIX}/Include")
+set(CMSIS_DSP_PRI_INC_DIR   "${CMSIS_SRC_PATH}/${CMSIS_DSP_PATH_SUFFIX}/PrivateInclude")
+set(CMSIS_CORE_INC_DIR      "${CMSIS_SRC_PATH}/${CMSIS_CORE_PATH_SUFFIX}/Include")
+
+file(GLOB_RECURSE
+    CMSIS_DSP_SRC
+    "${CMSIS_DSP_SRC_DIR}/arm_*.c")
+
+# 4. Create static library
+set(CMSIS_DSP_TARGET        cmsis-dsp)
+
+add_library(${CMSIS_DSP_TARGET} STATIC ${CMSIS_DSP_SRC})
+
+target_include_directories(${CMSIS_DSP_TARGET} PUBLIC
+                           ${CMSIS_DSP_INC_DIR}
+                           ${CMSIS_CORE_INC_DIR})
+target_include_directories(${CMSIS_DSP_TARGET} PRIVATE
+                           ${CMSIS_DSP_PRI_INC_DIR})
+
+# 5. Add any custom/conditional flags for compilation or linkage
+if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL cortex-m55)
+    target_compile_definitions(${CMSIS_DSP_TARGET} PUBLIC
+        ARM_MATH_MVEI
+        ARM_MATH_DSP
+        ARM_MATH_LOOPUNROLL)
+elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL cortex-m33)
+    # Placeholder, if building with Cortex-M33
+endif()
+
+
+# 6. Provide the library path for the top level CMake to use:
+set(CMSIS_DSP_LIB   "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/lib${CMSIS_DSP_TARGET}.a")
+message(STATUS "CMSIS_DSP_LIB set to be generated here: ${CMSIS_DSP_LIB}")
+
+message(STATUS "CMAKE_CURRENT_SOURCE_DIR: " ${CMAKE_CURRENT_SOURCE_DIR})
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${CMSIS_DSP_TARGET})
+message(STATUS "Build type                             : " ${CMAKE_BUILD_TYPE})
+message(STATUS "TARGET_PLATFORM                        : " ${TARGET_PLATFORM})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/scripts/cmake/native-sources.cmake b/scripts/cmake/native-sources.cmake
new file mode 100644
index 0000000..743e075
--- /dev/null
+++ b/scripts/cmake/native-sources.cmake
@@ -0,0 +1,58 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+# Set the install prefix
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build_native)
+set(PLAT_HAL ${CMAKE_CURRENT_SOURCE_DIR}/source/application/hal/platforms/native)
+
+if (ETHOS_U55_ENABLED)
+    message(WARNING "EthosU can't be enabled for native builds."
+                    "Use -DETHOS_U55_ENABLED=0 flag for this target platform."
+                    "Overriding, disabling use of EthosU...")
+    set(ETHOS_U55_ENABLED OFF)
+endif()
+
+if (DEFINED LOG_LEVEL)
+    message(STATUS "Setting log level to ${LOG_LEVEL}")
+    set (LOG_FLAG "-DLOG_LEVEL=${LOG_LEVEL}")
+endif()
+
+set(TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME  "libtensorflow-microlite.a")
+set(TENSORFLOW_LITE_MICRO_FLAGS "-DTF_LITE_STATIC_MEMORY -DACTIVATION_BUF_SRAM_SZ=0")
+
+set(CMAKE_C_FLAGS
+        "${WARNING_FLAGS} ${SPECIAL_OPTS} ${PLATFORM_FLAGS}\
+        ${PROFILING_OPT} ${TF_FLAG} ${LOG_FLAG} ${TENSORFLOW_LITE_MICRO_FLAGS}"
+        CACHE INTERNAL "")
+set(CMAKE_CXX_FLAGS
+        "${WARNING_FLAGS} ${SPECIAL_OPTS} ${SPECIAL_OPTS_CXX}\
+        ${PLATFORM_FLAGS} ${PROFILING_OPT} ${TF_FLAG} ${LOG_FLAG}\
+        ${TENSORFLOW_LITE_MICRO_FLAGS}"
+        CACHE INTERNAL "")
+
+# Include directories:
+set(PLAT_INCLUDE_DIRS
+    ${PLAT_HAL}/utils/include
+    ${PLAT_HAL}/images/include
+    ${PLAT_HAL}/data_presentation/log/include
+    ${PLAT_HAL}/timer/include
+    )
+
+# Source files
+file(GLOB_RECURSE SRC_PLAT_HAL
+    "${PLAT_HAL}/**/*.c"
+    "${PLAT_HAL}/**/*.cc"
+    )
diff --git a/scripts/cmake/native-toolchain.cmake b/scripts/cmake/native-toolchain.cmake
new file mode 100644
index 0000000..2e28cd4
--- /dev/null
+++ b/scripts/cmake/native-toolchain.cmake
@@ -0,0 +1,40 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+set(CMAKE_CXX_COMPILER          g++)
+set(CMAKE_C_COMPILER            gcc)
+set(CMAKE_C_LINKER_PREFERENCE   gcc)
+set(CMAKE_CXX_LINKER_PREFERENCE gcc)
+
+set(CMAKE_C_FLAGS_DEBUG         "-DDEBUG -O0 -g")
+set(CMAKE_C_FLAGS_RELEASE       "-DNDEBUG -O3")
+
+set(CMAKE_CXX_FLAGS_DEBUG       "-DDEBUG -O0 -g")
+set(CMAKE_CXX_FLAGS_RELEASE     "-DNDEBUG -O3")
+
+# Platform specific directory:
+set(PLATFORM_HAL                3)
+set(WARNING_FLAGS               "-Wsign-compare -Wshadow         \
+                                 -Wextra -Wall -Wunused-function \
+                                 -Wmissing-field-initializers    \
+                                 -Wswitch -Wvla -Wunused-parameter")
+set(SPECIAL_OPTS                "-fPIC -pthread")
+set(PLATFORM_FLAGS              "-DPLATFORM_HAL=${PLATFORM_HAL}")
+set(SPECIAL_OPTS_CXX            "-fno-threadsafe-statics")
+set(CMAKE_EXE_LINKER_FLAGS      "-lm -lc -lstdc++ --verbose")
+
+function(enforce_compiler_version)
+endfunction()
diff --git a/scripts/cmake/source_gen_utils.cmake b/scripts/cmake/source_gen_utils.cmake
new file mode 100644
index 0000000..8653016
--- /dev/null
+++ b/scripts/cmake/source_gen_utils.cmake
@@ -0,0 +1,270 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+set(SCRIPTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/scripts)
+
+##############################################################################
+# This function generates C++ files for images located in the directory it is
+# pointed at. NOTE: uses python
+##############################################################################
+function(generate_images_code input_dir src_out hdr_out img_size)
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_dir_abs ${input_dir} ABSOLUTE)
+    get_filename_component(src_out_abs ${src_out} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${hdr_out} ABSOLUTE)
+
+    message(STATUS "Generating image files from ${input_dir_abs}")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_rgb_cpp.py
+        --image_path ${input_dir_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --image_size ${img_size} ${img_size}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate image files.")
+    endif ()
+
+endfunction()
+
+##############################################################################
+# This function generates C++ files for audio files located in the directory it is
+# pointed at. NOTE: uses python
+##############################################################################
+function(generate_audio_code input_dir src_out hdr_out s_rate_opt mono_opt off_opt duration_opt res_type_opt min_sample_opt)
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_dir_abs ${input_dir} ABSOLUTE)
+    get_filename_component(src_out_abs ${src_out} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${hdr_out} ABSOLUTE)
+
+    to_py_bool(mono_opt mono_opt_py)
+
+    message(STATUS "Generating audio files from ${input_dir_abs}")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_audio_cpp.py
+        --audio_path ${input_dir_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --sampling_rate ${s_rate_opt}
+        --mono ${mono_opt_py}
+        --offset ${off_opt}
+        --duration ${duration_opt}
+        --res_type ${res_type_opt}
+        --min_samples ${min_sample_opt}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate audio files.")
+    endif ()
+
+endfunction()
+
+##############################################################################
+# This function generates default empty input C++ files for applications with no
+# external input. Main use is for the inference runner. NOTE: uses python
+##############################################################################
+function(generate_default_input_code hdr_out)
+
+    # Absolute paths for passing into python script
+    get_filename_component(hdr_out_abs ${hdr_out} ABSOLUTE)
+
+    message(STATUS "Generating default input files")
+    execute_process(
+            COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_default_input_cpp.py
+            --header_folder_path ${hdr_out_abs}
+            RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate default input .")
+    endif ()
+
+endfunction()
+##############################################################################
+# This function generates C++ files for tflite NN model files.
+# @param[in]    MODEL_PATH      path to a tflite file
+# @param[in]    DESTINATION     directory in which the output cc must be
+#                               placed
+# @param[in]    EXPRESSIONS     C++ code expressions to add to the generated file
+# @param[in]    NAMESPACE       model name space
+# NOTE: Uses python
+##############################################################################
+function(generate_tflite_code)
+
+    set(multiValueArgs EXPRESSIONS NAMESPACE)
+    set(oneValueArgs MODEL_PATH DESTINATION)
+    cmake_parse_arguments(PARSED "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+    # Absolute paths for passing into python script
+    get_filename_component(ABS_MODEL_PATH ${PARSED_MODEL_PATH} ABSOLUTE)
+    get_filename_component(ABS_DESTINATION ${PARSED_DESTINATION} ABSOLUTE)
+
+    if (EXISTS ${ABS_MODEL_PATH})
+        message(STATUS "Using ${ABS_MODEL_PATH}")
+    else ()
+        message(FATAL_ERROR "${ABS_MODEL_PATH} not found!")
+    endif ()
+
+
+    foreach(expression ${PARSED_EXPRESSIONS})
+        set(py_arg_exp ${py_arg_exp} --expression=${expression})
+    endforeach()
+
+    foreach(name ${PARSED_NAMESPACE})
+        set(py_arg_exp ${py_arg_exp} --namespaces=${name})
+    endforeach()
+
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_model_cpp.py
+        --tflite_path ${ABS_MODEL_PATH}
+        --output_dir ${ABS_DESTINATION} ${py_arg_exp}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate model files.")
+    endif ()
+endfunction()
+
+
+##############################################################################
+# This function generates C++ file for a given labels' text file.
+# @param[in]    INPUT          Path to the label text file
+# @param[in]    DESTINATION_SRC directory in which the output cc must be
+#                               placed
+# @param[in]    DESTINATION_HDR directory in which the output h file must be
+#                               placed
+# @param[in]    OUTPUT_FILENAME    Path to required output file
+# @param[in]    NAMESPACE       data name space
+# NOTE: Uses python
+##############################################################################
+function(generate_labels_code)
+
+    set(multiValueArgs NAMESPACE)
+    set(oneValueArgs INPUT DESTINATION_SRC DESTINATION_HDR OUTPUT_FILENAME)
+    cmake_parse_arguments(PARSED "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_abs ${PARSED_INPUT} ABSOLUTE)
+    get_filename_component(src_out_abs ${PARSED_DESTINATION_SRC} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${PARSED_DESTINATION_HDR} ABSOLUTE)
+
+    message(STATUS "Generating labels file from ${PARSED_INPUT}")
+    file(REMOVE "${hdr_out_abs}/${PARSED_OUTPUT_FILENAME}.hpp")
+    file(REMOVE "${src_out_abs}/${PARSED_OUTPUT_FILENAME}.cc")
+
+    foreach(name ${PARSED_NAMESPACE})
+        set(py_arg_exp ${py_arg_exp} --namespaces=${name})
+    endforeach()
+
+    message(STATUS "writing to ${hdr_out_abs}/${PARSED_OUTPUT_FILENAME}.hpp and ${src_out_abs}/${PARSED_OUTPUT_FILENAME}.cc")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_labels_cpp.py
+        --labels_file ${input_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --output_file_name ${PARSED_OUTPUT_FILENAME} ${py_arg_exp}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate label files.")
+    endif ()
+endfunction()
+
+
+##############################################################################
+# This function generates C++ data files for test located in the directory it is
+# pointed at.
+# @param[in]    INPUT_DIR       directory in which are the npy files
+# @param[in]    DESTINATION_SRC directory in which the output cc must be
+#                               placed
+# @param[in]    DESTINATION_HDR directory in which the output h file must be
+#                               placed
+# @param[in]    USECASE         name of the sub-usecase
+# @param[in]    NAMESPACE       data name space
+# NOTE: Uses python
+##############################################################################
+function(generate_test_data_code)
+
+    set(multiValueArgs NAMESPACE)
+    set(oneValueArgs INPUT_DIR DESTINATION_SRC DESTINATION_HDR USECASE)
+    cmake_parse_arguments(PARSED "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_dir_abs ${PARSED_INPUT_DIR} ABSOLUTE)
+    get_filename_component(src_out_abs ${PARSED_DESTINATION_SRC} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${PARSED_DESTINATION_HDR} ABSOLUTE)
+
+    foreach(name ${PARSED_NAMESPACE})
+        set(py_arg_exp ${py_arg_exp} --namespaces=${name})
+    endforeach()
+
+    message(STATUS "Generating test ifm and ofm files from ${input_dir_abs}")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_test_data_cpp.py
+        --data_folder_path ${input_dir_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --usecase ${PARSED_USECASE}
+        ${py_arg_exp}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate test data files.")
+    endif ()
+
+endfunction()
+
+
+##############################################################################
+# Function to prepare a python virtual environment for running the functions
+# outlined above.
+##############################################################################
+function(setup_source_generator)
+    if (${CMAKE_HOST_WIN32})
+#        windows python3 has python.exe
+        set(PY_EXEC python)
+        set(PYTHON ${CMAKE_BINARY_DIR}/pyenv/Scripts/${PY_EXEC})
+    else()
+        set(PY_EXEC python3)
+        set(PYTHON ${CMAKE_BINARY_DIR}/pyenv/bin/${PY_EXEC})
+    endif()
+    set(PYTHON ${PYTHON} PARENT_SCOPE)
+
+    if (EXISTS ${PYTHON})
+        message(STATUS "Using existing python at ${PYTHON}")
+        return()
+    endif ()
+    message(STATUS "Configuring python environment at ${PYTHON}")
+    execute_process(
+        COMMAND ${PY_EXEC} -m venv ${CMAKE_BINARY_DIR}/pyenv
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to setup python3 environment")
+    endif ()
+
+    execute_process(COMMAND ${PYTHON} -m pip install wheel)
+
+    execute_process(
+        COMMAND ${PYTHON} -m pip install -r ${SCRIPTS_DIR}/py/requirements.txt
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to setup python3 environment")
+    endif ()
+endfunction()
diff --git a/scripts/cmake/subsystem-profiles/corstone-sse-200.cmake b/scripts/cmake/subsystem-profiles/corstone-sse-200.cmake
new file mode 100644
index 0000000..8e2cd98
--- /dev/null
+++ b/scripts/cmake/subsystem-profiles/corstone-sse-200.cmake
@@ -0,0 +1,255 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMake configuration file for peripheral memory map for MPS3 as per SSE-200 design
+###################################################################################################
+#                              Application specific config                                        #
+###################################################################################################
+
+# This parameter is based on the linker/scatter script for SSE-200. Do not change this parameter
+# in isolation.
+set(ACTIVATION_BUF_SRAM_SZ "0x00200000" CACHE STRING "Maximum SRAM size for activation buffers")
+set(DESIGN_NAME            "SSE-200"    CACHE STRING "Design name")
+###################################################################################################
+#                                         Mem sizes                                               #
+###################################################################################################
+set(ITCM_SIZE             "0x00100000" CACHE STRING "ITCM size:         1 MiB")
+set(DTCM_BLK_SIZE         "0x00100000" CACHE STRING "DTCM size:         1 MiB, 4 banks")
+set(BRAM_SIZE             "0x00200000" CACHE STRING "BRAM size:         2 MiB")
+set(QSPI_SRAM_SIZE        "0x00800000" CACHE STRING "QSPI Flash size:   8 MiB")
+set(DDR4_BLK_SIZE         "0x10000000" CACHE STRING "DDR4 block size: 256 MiB")
+
+###################################################################################################
+#                                         Base addresses                                          #
+###################################################################################################
+set(ITCM_BASE_NS          "0x00000000" CACHE STRING "Instruction TCM Non-Secure base address")
+set(BRAM_BASE_NS          "0x01000000" CACHE STRING "CODE SRAM Non-Secure base address")
+set(DTCM0_BASE_NS         "0x20000000" CACHE STRING "Data TCM block 0 Non-Secure base address")
+set(DTCM1_BASE_NS         "0x20100000" CACHE STRING "Data TCM block 1 Non-Secure base address")
+set(DTCM2_BASE_NS         "0x20200000" CACHE STRING "Data TCM block 2 Non-Secure base address")
+set(DTCM3_BASE_NS         "0x20300000" CACHE STRING "Data TCM block 3 Non-Secure base address")
+set(QSPI_SRAM_BASE_NS     "0x28000000" CACHE STRING "QSPI SRAM Non-Secure base address")
+set(DDR4_BLK0_BASE_NS     "0x60000000" CACHE STRING "DDR4 block 0 Non-Secure base address")
+set(DDR4_BLK1_BASE_NS     "0x80000000" CACHE STRING "DDR4 block 1 Non-Secure base address")
+set(DDR4_BLK2_BASE_NS     "0xA0000000" CACHE STRING "DDR4 block 2 Non-Secure base address")
+set(DDR4_BLK3_BASE_NS     "0xC0000000" CACHE STRING "DDR4 block 3 Non-Secure base address")
+
+set(ITCM_BASE_S           "0x10000000" CACHE STRING "Instruction TCM Secure base address")
+set(BRAM_BASE_S           "0x11000000" CACHE STRING "CODE SRAM Secure base address")
+set(DTCM0_BASE_S          "0x30000000" CACHE STRING "Data TCM block 0 Secure base address")
+set(DTCM1_BASE_S          "0x30100000" CACHE STRING "Data TCM block 1 Secure base address")
+set(DTCM2_BASE_S          "0x30200000" CACHE STRING "Data TCM block 2 Secure base address")
+set(DTCM3_BASE_S          "0x30300000" CACHE STRING "Data TCM block 3 Secure base address")
+set(DDR4_BLK0_BASE_S      "0x70000000" CACHE STRING "DDR4 block 0 Secure base address")
+set(DDR4_BLK1_BASE_S      "0x90000000" CACHE STRING "DDR4 block 1 Secure base address")
+set(DDR4_BLK2_BASE_S      "0xB0000000" CACHE STRING "DDR4 block 2 Secure base address")
+set(DDR4_BLK3_BASE_S      "0xD0000000" CACHE STRING "DDR4 block 3 Secure base address")
+
+set(CMSDK_GPIO0_BASE      "0x41100000" CACHE STRING "User GPIO 0 Base Address")
+set(CMSDK_GPIO1_BASE      "0x41101000" CACHE STRING "User GPIO 1 Base Address")
+set(CMSDK_GPIO2_BASE      "0x41102000" CACHE STRING "User GPIO 2 Base Address")
+set(CMSDK_GPIO3_BASE      "0x41103000" CACHE STRING "User GPIO 3 Base Address")
+
+if (ETHOS_U55_ENABLED)
+    set(ETHOS_U55_BASE       "0x41700000" CACHE STRING "Ethos-U55 base address")
+    set(ETHOS_U55_TA0_BASE   "0x41701000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(ETHOS_U55_TA1_BASE   "0x41701200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif ()
+
+set(MPS3_I2C0_BASE        "0x41200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(MPS3_I2C1_BASE        "0x41201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(MPS3_SSP2_BASE        "0x41202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(MPS3_SSP3_BASE        "0x41203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+
+set(MPS3_SSP4_BASE        "0x41204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(MPS3_I2C2_BASE        "0x41205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(MPS3_I2C3_BASE        "0x41206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(MPS3_I2C4_BASE        "0x41207000" CACHE STRING "HDMI I2C SBCon Base Address ")
+set(MPS3_I2C5_BASE        "0x41208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+set(MPS3_SCC_BASE         "0x41300000" CACHE STRING "SCC Base Address ")
+set(MPS3_AAIC_I2S_BASE    "0x41301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(MPS3_FPGAIO_BASE      "0x41302000" CACHE STRING "FPGA IO Base Address ")
+set(CMSDK_UART0_BASE      "0x41303000" CACHE STRING "UART 0 Base Address ")
+set(CMSDK_UART1_BASE      "0x41304000" CACHE STRING "UART 1 Base Address ")
+set(CMSDK_UART2_BASE      "0x41305000" CACHE STRING "UART 2 Base Address ")
+set(CMSDK_UART3_BASE      "0x41306000" CACHE STRING "UART 3 Base Address Shield 0")
+
+set(CMSDK_UART4_BASE      "0x41307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(CMSDK_UART5_BASE      "0x41308000" CACHE STRING "UART 5 Base Address ")
+set(HDMI_AUDIO_BASE       "0x41309000" CACHE STRING "HDMI AUDIO Base Address ")
+set(CLCD_CONFIG_BASE      "0x4130A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(RTC_BASE              "0x4130B000" CACHE STRING "RTC Base address ")
+set(SMSC9220_BASE         "0x41400000" CACHE STRING "Ethernet SMSC9220 Base Address ")
+set(USB_BASE              "0x41500000" CACHE STRING "USB Base Address ")
+
+set(MPS3_eMMC_BASE        "0x41702000" CACHE STRING "User eMMC Base Address")
+set(USER_BASE             "0x41703000" CACHE STRING "User ? Base Address ")
+
+set(QSPI_XIP_BASE         "0x41800000" CACHE STRING "QSPI XIP config Base Address ")
+set(QSPI_WRITE_BASE       "0x41801000" CACHE STRING "QSPI write config Base Address ")
+
+set(SEC_CMSDK_GPIO0_BASE  "0x51100000" CACHE STRING "User GPIO 0 Base Address")
+set(SEC_CMSDK_GPIO1_BASE  "0x51101000" CACHE STRING "User GPIO 0 Base Address")
+set(SEC_CMSDK_GPIO2_BASE  "0x51102000" CACHE STRING "User GPIO 0 Base Address")
+set(SEC_CMSDK_GPIO3_BASE  "0x51103000" CACHE STRING "User GPIO 0 Base Address")
+
+set(SEC_MPS3_I2C0_BASE    "0x51200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(SEC_MPS3_I2C1_BASE    "0x51201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(SEC_MPS3_SSP2_BASE    "0x51202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(SEC_MPS3_SSP3_BASE    "0x51203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+
+set(SEC_MPS3_SSP4_BASE    "0x51204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(SEC_MPS3_I2C2_BASE    "0x51205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(SEC_MPS3_I2C3_BASE    "0x51206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(SEC_MPS3_I2C4_BASE    "0x51207000" CACHE STRING "HDMI I2C SBCon Base Address ")
+set(SEC_MPS3_I2C5_BASE    "0x51208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+set(SEC_MPS3_SCC_BASE     "0x51300000" CACHE STRING "SCC Base Address ")
+set(SEC_MPS3_AAIC_I2S_BASE     "0x51301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(SEC_MPS3_FPGAIO_BASE   "0x51302000" CACHE STRING "FPGA IO Base Address ")
+set(SEC_CMSDK_UART0_BASE   "0x51303000" CACHE STRING "UART 0 Base Address ")
+set(SEC_CMSDK_UART1_BASE   "0x51304000" CACHE STRING "UART 1 Base Address ")
+set(SEC_CMSDK_UART2_BASE   "0x51305000" CACHE STRING "UART 2 Base Address ")
+set(SEC_CMSDK_UART3_BASE   "0x51306000" CACHE STRING "UART 3 Base Address Shield 0")
+
+set(SEC_CMSDK_UART4_BASE   "0x51307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(SEC_CMSDK_UART5_BASE   "0x51308000" CACHE STRING "UART 5 Base Address ")
+set(SEC_HDMI_AUDIO_BASE    "0x51309000" CACHE STRING "HDMI AUDIO Base Address ")
+set(SEC_CLCD_CONFIG_BASE   "0x5130A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(SEC_RTC_BASE           "0x5130B000" CACHE STRING "RTC Base address ")
+set(SEC_SMSC9220_BASE      "0x51400000" CACHE STRING "Ethernet SMSC9220 Base Address ")
+set(SEC_USB_BASE           "0x51500000" CACHE STRING "USB Base Address ")
+
+if (ETHOS_U55_ENABLED)
+    set(SEC_ETHOS_U55_BASE        "0x51700000" CACHE STRING "Ethos-U55 base address")
+    set(SEC_ETHOS_U55_TA0_BASE    "0x51701000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(SEC_ETHOS_U55_TA1_BASE    "0x51701200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif ()
+
+set(SEC_MMC_BASE          "0x51702000" CACHE STRING "User eMMC Base Address")
+set(SEC_USER_BASE         "0x51703000" CACHE STRING "User ? Base Address ")
+
+set(SEC_QSPI_XIP_BASE     "0x51800000" CACHE STRING "QSPI XIP config Base Address ")
+set(SEC_QSPI_WRITE_BASE   "0x51801000" CACHE STRING "QSPI write config Base Address ")
+
+###################################################################################################
+#                                           IRQ numbers                                           #
+###################################################################################################
+set(NONSEC_WATCHDOG_RESET_IRQn    " 0" CACHE STRING " Non-Secure Watchdog Reset Interrupt")
+set(NONSEC_WATCHDOG_IRQn          " 1" CACHE STRING " Non-Secure Watchdog Interrupt         ")
+set(S32K_TIMER_IRQn               " 2" CACHE STRING " S32K Timer Interrupt                  ")
+set(TIMER0_IRQn                   " 3" CACHE STRING " TIMER 0 Interrupt                     ")
+set(TIMER1_IRQn                   " 4" CACHE STRING " TIMER 1 Interrupt                     ")
+set(DUALTIMER_IRQn                " 5" CACHE STRING " Dual Timer Interrupt                  ")
+set(MPC_IRQn                      " 9" CACHE STRING " MPC Combined (Secure) Interrupt       ")
+set(PPC_IRQn                      "10" CACHE STRING " PPC Combined (Secure) Interrupt       ")
+set(MSC_IRQn                      "11" CACHE STRING " MSC Combined (Secure) Interrput       ")
+set(BRIDGE_ERROR_IRQn             "12" CACHE STRING " Bridge Error Combined (Secure) Interrupt ")
+
+set(UARTRX0_IRQn                  "32" CACHE STRING " UART 0 RX Interrupt                   ")
+set(UARTTX0_IRQn                  "33" CACHE STRING " UART 0 TX Interrupt                   ")
+set(UARTRX1_IRQn                  "34" CACHE STRING " UART 1 RX Interrupt                   ")
+set(UARTTX1_IRQn                  "35" CACHE STRING " UART 1 TX Interrupt                   ")
+set(UARTRX2_IRQn                  "36" CACHE STRING " UART 2 RX Interrupt                   ")
+set(UARTTX2_IRQn                  "37" CACHE STRING " UART 2 TX Interrupt                   ")
+set(UARTRX3_IRQn                  "38" CACHE STRING " UART 3 RX Interrupt                   ")
+set(UARTTX3_IRQn                  "39" CACHE STRING " UART 3 TX Interrupt                   ")
+set(UARTRX4_IRQn                  "40" CACHE STRING " UART 4 RX Interrupt                   ")
+set(UARTTX4_IRQn                  "41" CACHE STRING " UART 4 TX Interrupt                   ")
+set(UART0_IRQn                    "42" CACHE STRING " UART 0 combined Interrupt             ")
+set(UART1_IRQn                    "43" CACHE STRING " UART 1 combined Interrupt             ")
+set(UART2_IRQn                    "44" CACHE STRING " UART 2 combined Interrupt             ")
+set(UART3_IRQn                    "45" CACHE STRING " UART 3 combined Interrupt             ")
+set(UART4_IRQn                    "46" CACHE STRING " UART 4 combined Interrupt             ")
+set(UARTOVF_IRQn                  "47" CACHE STRING " UART 0,1,2,3,4 Overflow Interrupt     ")
+set(ETHERNET_IRQn                 "48" CACHE STRING " Ethernet Interrupt                    ")
+set(I2S_IRQn                      "49" CACHE STRING " I2S Interrupt                         ")
+set(TSC_IRQn                      "50" CACHE STRING " Touch Screen Interrupt                ")
+set(SPI2_IRQn                     "52" CACHE STRING " SPI 2 Interrupt                       ")
+set(SPI3_IRQn                     "53" CACHE STRING " SPI 3 Interrupt                       ")
+set(SPI4_IRQn                     "54" CACHE STRING " SPI 4 Interrupt                       ")
+
+if (ETHOS_U55_ENABLED)
+    if (CPU_CORTEX_M55 EQUAL 1)
+        set(EthosU_IRQn           "55" CACHE STRING " Ethos-U55 Interrupt                   ")
+    elseif (CPU_CORTEX_M33 EQUAL 1)
+        set(EthosU_IRQn           "67" CACHE STRING " Ethos-U55 Interrupt                   ")
+    endif()
+endif ()
+
+set(GPIO0_IRQn                    "68" CACHE STRING " GPIO 0 Combined Interrupt             ")
+set(GPIO1_IRQn                    "69" CACHE STRING " GPIO 1 Combined Interrupt             ")
+set(GPIO2_IRQn                    "70" CACHE STRING " GPIO 2 Combined Interrupt             ")
+set(GPIO3_IRQn                    "71" CACHE STRING " GPIO 3 Combined Interrupt             ")
+
+set(GPIO0_0_IRQn                  "72" CACHE STRING "")
+set(GPIO0_1_IRQn                  "73" CACHE STRING "")
+set(GPIO0_2_IRQn                  "74" CACHE STRING "")
+set(GPIO0_3_IRQn                  "75" CACHE STRING "")
+set(GPIO0_4_IRQn                  "76" CACHE STRING "")
+set(GPIO0_5_IRQn                  "77" CACHE STRING "")
+set(GPIO0_6_IRQn                  "78" CACHE STRING "")
+set(GPIO0_7_IRQn                  "79" CACHE STRING "")
+set(GPIO0_8_IRQn                  "80" CACHE STRING "")
+set(GPIO0_9_IRQn                  "81" CACHE STRING "")
+set(GPIO0_10_IRQn                 "82" CACHE STRING "")
+set(GPIO0_11_IRQn                 "83" CACHE STRING "")
+set(GPIO0_12_IRQn                 "84" CACHE STRING "")
+set(GPIO0_13_IRQn                 "85" CACHE STRING "")
+set(GPIO0_14_IRQn                 "86" CACHE STRING "")
+set(GPIO0_15_IRQn                 "87" CACHE STRING "")
+set(GPIO1_0_IRQn                  "88" CACHE STRING "")
+set(GPIO1_1_IRQn                  "89" CACHE STRING "")
+set(GPIO1_2_IRQn                  "90" CACHE STRING "")
+set(GPIO1_3_IRQn                  "91" CACHE STRING "")
+set(GPIO1_4_IRQn                  "92" CACHE STRING "")
+set(GPIO1_5_IRQn                  "93" CACHE STRING "")
+set(GPIO1_6_IRQn                  "94" CACHE STRING "")
+set(GPIO1_7_IRQn                  "95" CACHE STRING "")
+set(GPIO1_8_IRQn                  "96" CACHE STRING "")
+set(GPIO1_9_IRQn                  "97" CACHE STRING "")
+set(GPIO1_10_IRQn                 "98" CACHE STRING "")
+set(GPIO1_11_IRQn                 "99" CACHE STRING "")
+set(GPIO1_12_IRQn                 "100" CACHE STRING "")
+set(GPIO1_13_IRQn                 "101" CACHE STRING "")
+set(GPIO1_14_IRQn                 "102" CACHE STRING "")
+set(GPIO1_15_IRQn                 "103" CACHE STRING "")
+set(GPIO2_0_IRQn                  "104" CACHE STRING "")
+set(GPIO2_1_IRQn                  "105" CACHE STRING "")
+set(GPIO2_2_IRQn                  "106" CACHE STRING "")
+set(GPIO2_3_IRQn                  "107" CACHE STRING "")
+set(GPIO2_4_IRQn                  "108" CACHE STRING "")
+set(GPIO2_5_IRQn                  "109" CACHE STRING "")
+set(GPIO2_6_IRQn                  "110" CACHE STRING "")
+set(GPIO2_7_IRQn                  "111" CACHE STRING "")
+set(GPIO2_8_IRQn                  "112" CACHE STRING "")
+set(GPIO2_9_IRQn                  "113" CACHE STRING "")
+set(GPIO2_10_IRQn                 "114" CACHE STRING "")
+set(GPIO2_11_IRQn                 "115" CACHE STRING "")
+set(GPIO2_12_IRQn                 "116" CACHE STRING "")
+set(GPIO2_13_IRQn                 "117" CACHE STRING "")
+set(GPIO2_14_IRQn                 "118" CACHE STRING "")
+set(GPIO2_15_IRQn                 "119" CACHE STRING "")
+set(GPIO3_0_IRQn                  "120" CACHE STRING "")
+set(GPIO3_1_IRQn                  "121" CACHE STRING "")
+set(GPIO3_2_IRQn                  "122" CACHE STRING "")
+set(GPIO3_3_IRQn                  "123" CACHE STRING "")
+set(UARTRX5_IRQn                  "124" CACHE STRING "UART 5 RX Interrupt")
+set(UARTTX5_IRQn                  "125" CACHE STRING "UART 5 TX Interrupt")
+set(UART5_IRQn                    "126" CACHE STRING "UART 5 combined Interrupt")
+set(HDCLCD_IRQn                   "127" CACHE STRING "HDCLCD Interrupt")
diff --git a/scripts/cmake/subsystem-profiles/corstone-sse-300.cmake b/scripts/cmake/subsystem-profiles/corstone-sse-300.cmake
new file mode 100644
index 0000000..8b565fe
--- /dev/null
+++ b/scripts/cmake/subsystem-profiles/corstone-sse-300.cmake
@@ -0,0 +1,309 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMake configuration file for peripheral memory map for MPS3 as per SSE-300 design
+###################################################################################################
+#                              Application specific config                                        #
+###################################################################################################
+
+# This parameter is based on the linker/scatter script for SSE-300. Do not change this parameter
+# in isolation.
+set(ACTIVATION_BUF_SRAM_SZ "0x00400000" CACHE STRING "Maximum SRAM size for activation buffers")
+set(DESIGN_NAME            "Arm Corstone-300 (SSE-300)" CACHE STRING "Design name")
+
+###################################################################################################
+#                                         Mem sizes                                               #
+###################################################################################################
+set(ITCM_SIZE             "0x00080000" CACHE STRING "ITCM size:       512 kiB")
+set(DTCM_BLK_SIZE         "0x00020000" CACHE STRING "DTCM size:       128 kiB, 4 banks")
+set(BRAM_SIZE             "0x00200000" CACHE STRING "BRAM size:         2 MiB")
+set(ISRAM0_SIZE           "0x00200000" CACHE STRING "ISRAM0 size:       2 MiB")
+set(ISRAM1_SIZE           "0x00200000" CACHE STRING "ISRAM1 size:       2 MiB")
+set(QSPI_SRAM_SIZE        "0x00800000" CACHE STRING "QSPI Flash size:   8 MiB")
+set(DDR4_BLK_SIZE         "0x10000000" CACHE STRING "DDR4 block size: 256 MiB")
+
+###################################################################################################
+#                                Base addresses for memory regions                                #
+###################################################################################################
+set(ITCM_BASE_NS          "0x00000000" CACHE STRING "Instruction TCM Non-Secure base address")
+set(BRAM_BASE_NS          "0x01000000" CACHE STRING "CODE SRAM Non-Secure base address")
+set(DTCM0_BASE_NS         "0x20000000" CACHE STRING "Data TCM block 0 Non-Secure base address")
+set(DTCM1_BASE_NS         "0x20020000" CACHE STRING "Data TCM block 1 Non-Secure base address")
+set(DTCM2_BASE_NS         "0x20040000" CACHE STRING "Data TCM block 2 Non-Secure base address")
+set(DTCM3_BASE_NS         "0x20060000" CACHE STRING "Data TCM block 3 Non-Secure base address")
+set(ISRAM0_BASE_NS        "0x21000000" CACHE STRING "Internal SRAM Area Non-Secure base address")
+set(ISRAM1_BASE_NS        "0x21200000" CACHE STRING "Internal SRAM Area Non-Secure base address")
+set(QSPI_SRAM_BASE_NS     "0x28000000" CACHE STRING "QSPI SRAM Non-Secure base address")
+set(DDR4_BLK0_BASE_NS     "0x60000000" CACHE STRING "DDR4 block 0 Non-Secure base address")
+set(DDR4_BLK1_BASE_NS     "0x80000000" CACHE STRING "DDR4 block 1 Non-Secure base address")
+set(DDR4_BLK2_BASE_NS     "0xA0000000" CACHE STRING "DDR4 block 2 Non-Secure base address")
+set(DDR4_BLK3_BASE_NS     "0xC0000000" CACHE STRING "DDR4 block 3 Non-Secure base address")
+
+set(ITCM_BASE_S           "0x10000000" CACHE STRING "Instruction TCM Secure base address")
+set(BRAM_BASE_S           "0x11000000" CACHE STRING "CODE SRAM Secure base address")
+set(DTCM0_BASE_S          "0x30000000" CACHE STRING "Data TCM block 0 Secure base address")
+set(DTCM1_BASE_S          "0x30020000" CACHE STRING "Data TCM block 1 Secure base address")
+set(DTCM2_BASE_S          "0x30040000" CACHE STRING "Data TCM block 2 Secure base address")
+set(DTCM3_BASE_S          "0x30060000" CACHE STRING "Data TCM block 3 Secure base address")
+set(ISRAM0_BASE_S         "0x31000000" CACHE STRING "Internal SRAM Area Secure base address")
+set(ISRAM1_BASE_S         "0x31200000" CACHE STRING "Internal SRAM Area Secure base address")
+set(DDR4_BLK0_BASE_S      "0x70000000" CACHE STRING "DDR4 block 0 Secure base address")
+set(DDR4_BLK1_BASE_S      "0x90000000" CACHE STRING "DDR4 block 1 Secure base address")
+set(DDR4_BLK2_BASE_S      "0xB0000000" CACHE STRING "DDR4 block 2 Secure base address")
+set(DDR4_BLK3_BASE_S      "0xD0000000" CACHE STRING "DDR4 block 3 Secure base address")
+
+
+###################################################################################################
+#                     Base addresses for peripherals - non secure                                 #
+###################################################################################################
+set(CMSDK_GPIO0_BASE      "0x41100000" CACHE STRING "User GPIO 0 Base Address (4KB)")
+set(CMSDK_GPIO1_BASE      "0x41101000" CACHE STRING "User GPIO 1 Base Address (4KB)")
+set(CMSDK_GPIO2_BASE      "0x41102000" CACHE STRING "User GPIO 2 Base Address (4KB)")
+set(CMSDK_GPIO3_BASE      "0x41103000" CACHE STRING "User GPIO 3 Base Address (4KB)")
+
+set(AHB_USER0_BASE        "0x41104000" CACHE STRING "AHB USER 0 Base Address (4KB)")
+set(AHB_USER1_BASE        "0x41105000" CACHE STRING "AHB USER 1 Base Address (4KB)")
+set(AHB_USER2_BASE        "0x41106000" CACHE STRING "AHB USER 2 Base Address (4KB)")
+set(AHB_USER3_BASE        "0x41107000" CACHE STRING "AHB USER 3 Base Address (4KB)")
+
+set(DMA0_BASE             "0x41200000" CACHE STRING "DMA0 (4KB)")
+set(DMA1_BASE             "0x41201000" CACHE STRING "DMA1 (4KB)")
+set(DMA2_BASE             "0x41202000" CACHE STRING "DMA2 (4KB)")
+set(DMA3_BASE             "0x41203000" CACHE STRING "DMA3 (4KB)")
+
+set(SMSC9220_BASE         "0x41400000" CACHE STRING "Ethernet SMSC9220 Base Address (1MB)")
+set(USB_BASE              "0x41500000" CACHE STRING "USB Base Address (1MB)")
+
+set(USER_APB0_BASE        "0x41700000" CACHE STRING "User APB0")
+set(USER_APB1_BASE        "0x41701000" CACHE STRING "User APB1")
+set(USER_APB2_BASE        "0x41702000" CACHE STRING "User APB2")
+set(USER_APB3_BASE        "0x41703000" CACHE STRING "User APB3")
+
+set(QSPI_XIP_BASE         "0x41800000" CACHE STRING "QSPI XIP config Base Address ")
+set(QSPI_WRITE_BASE       "0x41801000" CACHE STRING "QSPI write config Base Address ")
+
+if (ETHOS_U55_ENABLED)
+    set(ETHOS_U55_BASE        "0x48102000" CACHE STRING "Ethos-U55 base address")
+    set(ETHOS_U55_TA0_BASE    "0x48103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(ETHOS_U55_TA1_BASE    "0x48103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif (ETHOS_U55_ENABLED)
+
+set(MPS3_I2C0_BASE        "0x49200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(MPS3_I2C1_BASE        "0x49201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(MPS3_SSP2_BASE        "0x49202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(MPS3_SSP3_BASE        "0x49203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+set(MPS3_SSP4_BASE        "0x49204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(MPS3_I2C2_BASE        "0x49205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(MPS3_I2C3_BASE        "0x49206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(USER_APB_BASE         "0x49207000" CACHE STRING "User APB")
+set(MPS3_I2C5_BASE        "0x49208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+
+set(MPS3_SCC_BASE         "0x49300000" CACHE STRING "SCC Base Address ")
+set(MPS3_AAIC_I2S_BASE    "0x49301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(MPS3_FPGAIO_BASE      "0x49302000" CACHE STRING "FPGA IO Base Address ")
+
+set(CMSDK_UART0_BASE      "0x49303000" CACHE STRING "UART 0 Base Address ")
+set(CMSDK_UART1_BASE      "0x49304000" CACHE STRING "UART 1 Base Address ")
+set(CMSDK_UART2_BASE      "0x49305000" CACHE STRING "UART 2 Base Address ")
+set(CMSDK_UART3_BASE      "0x49306000" CACHE STRING "UART 3 Base Address Shield 0")
+set(CMSDK_UART4_BASE      "0x49307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(CMSDK_UART5_BASE      "0x49308000" CACHE STRING "UART 5 Base Address ")
+
+set(CLCD_CONFIG_BASE      "0x4930A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(RTC_BASE              "0x4930B000" CACHE STRING "RTC Base address ")
+
+###################################################################################################
+#                     Base addresses for peripherals - secure                                     #
+###################################################################################################
+set(SEC_CMSDK_GPIO0_BASE   "0x51100000" CACHE STRING "User GPIO 0 Base Address (4KB)")
+set(SEC_CMSDK_GPIO1_BASE   "0x51101000" CACHE STRING "User GPIO 1 Base Address (4KB)")
+set(SEC_CMSDK_GPIO2_BASE   "0x51102000" CACHE STRING "User GPIO 2 Base Address (4KB)")
+set(SEC_CMSDK_GPIO3_BASE   "0x51103000" CACHE STRING "User GPIO 3 Base Address (4KB)")
+
+set(SEC_AHB_USER0_BASE     "0x51104000" CACHE STRING "AHB USER 0 Base Address (4KB)")
+set(SEC_AHB_USER1_BASE     "0x51105000" CACHE STRING "AHB USER 1 Base Address (4KB)")
+set(SEC_AHB_USER2_BASE     "0x51106000" CACHE STRING "AHB USER 2 Base Address (4KB)")
+set(SEC_AHB_USER3_BASE     "0x51107000" CACHE STRING "AHB USER 3 Base Address (4KB)")
+
+set(SEC_DMA0_BASE          "0x51200000" CACHE STRING "DMA0 (4KB)")
+set(SEC_DMA1_BASE          "0x51201000" CACHE STRING "DMA1 (4KB)")
+set(SEC_DMA2_BASE          "0x51202000" CACHE STRING "DMA2 (4KB)")
+set(SEC_DMA3_BASE          "0x51203000" CACHE STRING "DMA3 (4KB)")
+
+set(SEC_SMSC9220_BASE      "0x51400000" CACHE STRING "Ethernet SMSC9220 Base Address (1MB)")
+set(SEC_USB_BASE           "0x51500000" CACHE STRING "USB Base Address (1MB)")
+
+set(SEC_USER_APB0_BASE     "0x51700000" CACHE STRING "User APB0 Base Address")
+set(SEC_USER_APB1_BASE     "0x51701000" CACHE STRING "User APB1 Base Address")
+set(SEC_USER_APB2_BASE     "0x51702000" CACHE STRING "User APB2 Base Address")
+set(SEC_USER_APB3_BASE     "0x51703000" CACHE STRING "User APB3 Base Address")
+
+set(SEC_QSPI_XIP_BASE      "0x51800000" CACHE STRING "QSPI XIP config Base Address ")
+set(SEC_QSPI_WRITE_BASE    "0x51801000" CACHE STRING "QSPI write config Base Address ")
+
+if (ETHOS_U55_ENABLED)
+    set(SEC_ETHOS_U55_BASE     "0x58102000" CACHE STRING "Ethos-U55 base address")
+    set(SEC_ETHOS_U55_TA0_BASE "0x58103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(SEC_ETHOS_U55_TA1_BASE "0x58103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif (ETHOS_U55_ENABLED)
+
+set(SEC_MPS3_I2C0_BASE     "0x58200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(SEC_MPS3_I2C1_BASE     "0x58201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(SEC_MPS3_SSP2_BASE     "0x58202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(SEC_MPS3_SSP3_BASE     "0x58203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+set(SEC_MPS3_SSP4_BASE     "0x58204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(SEC_MPS3_I2C2_BASE     "0x58205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(SEC_MPS3_I2C3_BASE     "0x58206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(SEC_USER_APB_BASE      "0x58207000" CACHE STRING "User APB Base Address")
+set(SEC_MPS3_I2C5_BASE     "0x58208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+
+set(SEC_MPS3_SCC_BASE         "0x58300000" CACHE STRING "SCC Base Address ")
+set(SEC_MPS3_AAIC_I2S_BASE    "0x58301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(SEC_MPS3_FPGAIO_BASE      "0x58302000" CACHE STRING "FPGA IO Base Address ")
+
+set(SEC_CMSDK_UART0_BASE      "0x58303000" CACHE STRING "UART 0 Base Address ")
+set(SEC_CMSDK_UART1_BASE      "0x58304000" CACHE STRING "UART 1 Base Address ")
+set(SEC_CMSDK_UART2_BASE      "0x58305000" CACHE STRING "UART 2 Base Address ")
+set(SEC_CMSDK_UART3_BASE      "0x58306000" CACHE STRING "UART 3 Base Address Shield 0")
+set(SEC_CMSDK_UART4_BASE      "0x58307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(SEC_CMSDK_UART5_BASE      "0x58308000" CACHE STRING "UART 5 Base Address ")
+
+set(SEC_CLCD_CONFIG_BASE      "0x5830A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(SEC_RTC_BASE              "0x5830B000" CACHE STRING "RTC Base address ")
+
+
+###################################################################################################
+#                                           MPCs                                                  #
+###################################################################################################
+set(MPC_ISRAM0_BASE_S     "0x50083000" CACHE STRING "ISRAM0 Memory Protection Controller Secure base address")
+set(MPC_ISRAM1_BASE_S     "0x50084000" CACHE STRING "ISRAM1 Memory Protection Controller Secure base address")
+set(MPC_BRAM_BASE_S       "0x57000000" CACHE STRING "SRAM Memory Protection Controller Secure base address")
+set(MPC_QSPI_BASE_S       "0x57001000" CACHE STRING "QSPI Memory Protection Controller Secure base address")
+set(MPC_DDR4_BASE_S       "0x57002000" CACHE STRING "DDR4 Memory Protection Controller Secure base address")
+
+###################################################################################################
+#                                           IRQ numbers                                           #
+###################################################################################################
+set(NONSEC_WATCHDOG_RESET_IRQn    " 0" CACHE STRING " Non-Secure Watchdog Reset Interrupt")
+set(NONSEC_WATCHDOG_IRQn          " 1" CACHE STRING " Non-Secure Watchdog Interrupt         ")
+set(S32K_TIMER_IRQn               " 2" CACHE STRING " S32K Timer Interrupt                  ")
+set(TIMER0_IRQn                   " 3" CACHE STRING " TIMER 0 Interrupt                     ")
+set(TIMER1_IRQn                   " 4" CACHE STRING " TIMER 1 Interrupt                     ")
+set(DUALTIMER_IRQn                " 5" CACHE STRING " Dual Timer Interrupt                  ")
+set(MPC_IRQn                      " 9" CACHE STRING " MPC Combined (Secure) Interrupt       ")
+set(PPC_IRQn                      "10" CACHE STRING " PPC Combined (Secure) Interrupt       ")
+set(MSC_IRQn                      "11" CACHE STRING " MSC Combined (Secure) Interrput       ")
+set(BRIDGE_ERROR_IRQn             "12" CACHE STRING " Bridge Error Combined (Secure) Interrupt ")
+set(MGMT_PPU_IRQn                 "14" CACHE STRING " MGMT_PPU" )
+set(SYS_PPU_IRQn                  "15" CACHE STRING " SYS_PPU" )
+set(CPU0_PPU_IRQn                 "16" CACHE STRING " CPU0_PPU" )
+set(DEBUG_PPU_IRQn                "26" CACHE STRING " DEBUG_PPU" )
+set(TIMER3_AON_IRQn               "27" CACHE STRING " TIMER3_AON" )
+set(CPU0CTIIQ0_IRQn               "28" CACHE STRING " CPU0CTIIQ0" )
+set(CPU0CTIIQ01_IRQn              "29" CACHE STRING " CPU0CTIIQ01" )
+
+set(SYS_TSTAMP_COUNTER_IRQn       "32" CACHE STRING " System timestamp counter interrupt" )
+set(UARTRX0_IRQn                  "33" CACHE STRING " UART 0 RX Interrupt                   ")
+set(UARTTX0_IRQn                  "34" CACHE STRING " UART 0 TX Interrupt                   ")
+set(UARTRX1_IRQn                  "35" CACHE STRING " UART 1 RX Interrupt                   ")
+set(UARTTX1_IRQn                  "36" CACHE STRING " UART 1 TX Interrupt                   ")
+set(UARTRX2_IRQn                  "37" CACHE STRING " UART 2 RX Interrupt                   ")
+set(UARTTX2_IRQn                  "38" CACHE STRING " UART 2 TX Interrupt                   ")
+set(UARTRX3_IRQn                  "39" CACHE STRING " UART 3 RX Interrupt                   ")
+set(UARTTX3_IRQn                  "40" CACHE STRING " UART 3 TX Interrupt                   ")
+set(UARTRX4_IRQn                  "41" CACHE STRING " UART 4 RX Interrupt                   ")
+set(UARTTX4_IRQn                  "42" CACHE STRING " UART 4 TX Interrupt                   ")
+set(UART0_IRQn                    "43" CACHE STRING " UART 0 combined Interrupt             ")
+set(UART1_IRQn                    "44" CACHE STRING " UART 1 combined Interrupt             ")
+set(UART2_IRQn                    "45" CACHE STRING " UART 2 combined Interrupt             ")
+set(UART3_IRQn                    "46" CACHE STRING " UART 3 combined Interrupt             ")
+set(UART4_IRQn                    "47" CACHE STRING " UART 4 combined Interrupt             ")
+set(UARTOVF_IRQn                  "48" CACHE STRING " UART 0,1,2,3,4 Overflow Interrupt     ")
+set(ETHERNET_IRQn                 "49" CACHE STRING " Ethernet Interrupt                    ")
+set(I2S_IRQn                      "50" CACHE STRING " Audio I2S Interrupt                   ")
+set(TSC_IRQn                      "51" CACHE STRING " Touch Screen Interrupt                ")
+set(USB_IRQn                      "52" CACHE STRING " USB Interrupt                         ")
+set(SPI2_IRQn                     "53" CACHE STRING " ADC (SPI) Interrupt                   ")
+set(SPI3_IRQn                     "54" CACHE STRING " SPI 3 Interrupt (Shield 0)            ")
+set(SPI4_IRQn                     "55" CACHE STRING " SPI 4 Interrupt (Sheild 1)            ")
+
+if (ETHOS_U55_ENABLED)
+set(EthosU_IRQn                   "56" CACHE STRING " Ethos-U55 Interrupt                  ")
+endif ()
+
+set(GPIO0_IRQn                    "69" CACHE STRING " GPIO 0 Combined Interrupt             ")
+set(GPIO1_IRQn                    "70" CACHE STRING " GPIO 1 Combined Interrupt             ")
+set(GPIO2_IRQn                    "71" CACHE STRING " GPIO 2 Combined Interrupt             ")
+set(GPIO3_IRQn                    "72" CACHE STRING " GPIO 3 Combined Interrupt             ")
+
+set(GPIO0_0_IRQn                  "73" CACHE STRING "")
+set(GPIO0_1_IRQn                  "74" CACHE STRING "")
+set(GPIO0_2_IRQn                  "75" CACHE STRING "")
+set(GPIO0_3_IRQn                  "76" CACHE STRING "")
+set(GPIO0_4_IRQn                  "77" CACHE STRING "")
+set(GPIO0_5_IRQn                  "78" CACHE STRING "")
+set(GPIO0_6_IRQn                  "79" CACHE STRING "")
+set(GPIO0_7_IRQn                  "80" CACHE STRING "")
+set(GPIO0_8_IRQn                  "81" CACHE STRING "")
+set(GPIO0_9_IRQn                  "82" CACHE STRING "")
+set(GPIO0_10_IRQn                 "83" CACHE STRING "")
+set(GPIO0_11_IRQn                 "84" CACHE STRING "")
+set(GPIO0_12_IRQn                 "85" CACHE STRING "")
+set(GPIO0_13_IRQn                 "86" CACHE STRING "")
+set(GPIO0_14_IRQn                 "87" CACHE STRING "")
+set(GPIO0_15_IRQn                 "88" CACHE STRING "")
+set(GPIO1_0_IRQn                  "89" CACHE STRING "")
+set(GPIO1_1_IRQn                  "90" CACHE STRING "")
+set(GPIO1_2_IRQn                  "91" CACHE STRING "")
+set(GPIO1_3_IRQn                  "92" CACHE STRING "")
+set(GPIO1_4_IRQn                  "93" CACHE STRING "")
+set(GPIO1_5_IRQn                  "94" CACHE STRING "")
+set(GPIO1_6_IRQn                  "95" CACHE STRING "")
+set(GPIO1_7_IRQn                  "96" CACHE STRING "")
+set(GPIO1_8_IRQn                  "97" CACHE STRING "")
+set(GPIO1_9_IRQn                  "98" CACHE STRING "")
+set(GPIO1_10_IRQn                 "99" CACHE STRING "")
+set(GPIO1_11_IRQn                 "100" CACHE STRING "")
+set(GPIO1_12_IRQn                 "101" CACHE STRING "")
+set(GPIO1_13_IRQn                 "102" CACHE STRING "")
+set(GPIO1_14_IRQn                 "103" CACHE STRING "")
+set(GPIO1_15_IRQn                 "104" CACHE STRING "")
+set(GPIO2_0_IRQn                  "105" CACHE STRING "")
+set(GPIO2_1_IRQn                  "106" CACHE STRING "")
+set(GPIO2_2_IRQn                  "107" CACHE STRING "")
+set(GPIO2_3_IRQn                  "108" CACHE STRING "")
+set(GPIO2_4_IRQn                  "109" CACHE STRING "")
+set(GPIO2_5_IRQn                  "110" CACHE STRING "")
+set(GPIO2_6_IRQn                  "111" CACHE STRING "")
+set(GPIO2_7_IRQn                  "112" CACHE STRING "")
+set(GPIO2_8_IRQn                  "113" CACHE STRING "")
+set(GPIO2_9_IRQn                  "114" CACHE STRING "")
+set(GPIO2_10_IRQn                 "115" CACHE STRING "")
+set(GPIO2_11_IRQn                 "116" CACHE STRING "")
+set(GPIO2_12_IRQn                 "117" CACHE STRING "")
+set(GPIO2_13_IRQn                 "118" CACHE STRING "")
+set(GPIO2_14_IRQn                 "119" CACHE STRING "")
+set(GPIO2_15_IRQn                 "120" CACHE STRING "")
+set(GPIO3_0_IRQn                  "121" CACHE STRING "")
+set(GPIO3_1_IRQn                  "122" CACHE STRING "")
+set(GPIO3_2_IRQn                  "123" CACHE STRING "")
+set(GPIO3_3_IRQn                  "124" CACHE STRING "")
+set(UARTRX5_IRQn                  "125" CACHE STRING "UART 5 RX Interrupt")
+set(UARTTX5_IRQn                  "126" CACHE STRING "UART 5 TX Interrupt")
+set(UART5_IRQn                    "127" CACHE STRING "UART 5 combined Interrupt")
diff --git a/scripts/cmake/subsystem-profiles/simple_platform.cmake b/scripts/cmake/subsystem-profiles/simple_platform.cmake
new file mode 100644
index 0000000..c11706d
--- /dev/null
+++ b/scripts/cmake/subsystem-profiles/simple_platform.cmake
@@ -0,0 +1,51 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMake configuration file for peripheral memory map for simple platform. This is a stripped down
+# version of Arm Corstone-300 platform with minimal peripherals to be able to use Ethos-U55. However,
+# for ease of integration with Arm FastModel Tools, it uses PL011 as the UART component instead of
+# the CMSDK UART block used by the MPS3 FPGA and FVP implementations.
+
+###################################################################################################
+#                              Application specific config                                        #
+###################################################################################################
+
+# This parameter is based on the linker/scatter script for internal FVP. Do not change this
+# parameter in isolation.
+set(ACTIVATION_BUF_SRAM_SZ "0x00200000"      CACHE STRING "Maximum SRAM size for activation buffers")
+set(DESIGN_NAME            "Simple platform" CACHE STRING "Design name")
+
+###################################################################################################
+#                                         Base addresses                                          #
+###################################################################################################
+set(PL011_UART0_BASE            "0x49303000" CACHE STRING "PL011 UART 0 Base Address")
+
+if (ETHOS_U55_ENABLED)
+    set(ETHOS_U55_BASE          "0x48102000" CACHE STRING "Ethos-U55 base address")
+    set(ETHOS_U55_TA0_BASE      "0x48103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(ETHOS_U55_TA1_BASE      "0x48103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+    set(SEC_ETHOS_U55_BASE      "0x58102000" CACHE STRING "Ethos-U55 base address")
+    set(SEC_ETHOS_U55_TA0_BASE  "0x58103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(SEC_ETHOS_U55_TA1_BASE  "0x58103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif ()
+
+###################################################################################################
+#                                           IRQ numbers                                           #
+###################################################################################################
+if (ETHOS_U55_ENABLED)
+    set(EthosU_IRQn             "56"         CACHE STRING "Ethos-U55 Interrupt")
+endif ()
diff --git a/scripts/cmake/ta_config.cmake b/scripts/cmake/ta_config.cmake
new file mode 100644
index 0000000..427884c
--- /dev/null
+++ b/scripts/cmake/ta_config.cmake
@@ -0,0 +1,64 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+#----------------------------------------------------------------------------
+# CMake description file for the Ethos-U55 Timing Adapter settings (single
+# NPU core with two AXIs).
+#----------------------------------------------------------------------------
+
+set(TA0_BASE "${SEC_ETHOS_U55_TA0_BASE}"   CACHE STRING "Timing adapter 0: base-address")
+set(TA1_BASE "${SEC_ETHOS_U55_TA1_BASE}"   CACHE STRING "Timing adapter 1: base-address")
+
+message(STATUS "using TA0_BASE @ ${TA0_BASE}; TA1_BASE @ ${TA1_BASE}.")
+
+# Timing adapter settings for AXI0
+set(TA0_MAXR        "8"        CACHE STRING "6-bit field. Max no. of pending reads. 0=infinite")
+set(TA0_MAXW        "8"        CACHE STRING "6-bit field. Max no. of pending writes. 0=infinite")
+set(TA0_MAXRW       "0"        CACHE STRING "6-bit field. Max no. of pending reads+writes. 0=infinite")
+set(TA0_RLATENCY    "32"       CACHE STRING "12-bit field. Minimum latency (clock cycles) from AVALID to RVALID.")
+set(TA0_WLATENCY    "32"       CACHE STRING "12-bit field. Minimum latency (clock cycles) from WVALID&WLAST to BVALID.")
+set(TA0_PULSE_ON    "3999"     CACHE STRING "No. of cycles addresses let through (0-65535).")
+set(TA0_PULSE_OFF   "1"        CACHE STRING "No. of cycles addresses blocked (0-65535).")
+set(TA0_BWCAP       "4000"     CACHE STRING "16-bit field. Max no. of 64-bit words transfered per pulse cycle 0=infinite")
+set(TA0_PERFCTRL    "0"        CACHE STRING "6-bit field selecting an event for event counter 0=default")
+set(TA0_PERFCNT     "0"        CACHE STRING "32-bit event counter")
+set(TA0_MODE        "1"        CACHE STRING "Bit 0: 1=enable dynamic clocking to avoid underrun;
+                                             Bit 1: 1=enable random AR reordering (0=default);
+                                             Bit 2: 1=enable random R reordering (0=default);
+                                             Bit 3: 1=enable random B reordering (0=default);
+                                             Bit 11-4: Frequency scale 0=full speed, 255=(1/256) speed")
+set(TA0_HISTBIN     "0"        CACHE STRING "Controls which histogram bin (0-15) that should be accessed by HISTCNT.")
+set(TA0_HISTCNT     "0"        CACHE STRING "32-bit field. Read/write the selected histogram bin.")
+
+# Timing adapter settings for AXI1
+set(TA1_MAXR        "2"       CACHE STRING "6-bit field. Max no. of pending reads. 0=infinite")
+set(TA1_MAXW        "0"       CACHE STRING "6-bit field. Max no. of pending writes. 0=infinite")
+set(TA1_MAXRW       "0"       CACHE STRING "6-bit field. Max no. of pending reads+writes. 0=infinite")
+set(TA1_RLATENCY    "64"      CACHE STRING "12-bit field. Minimum latency (clock cycles) from AVALID to RVALID.")
+set(TA1_WLATENCY    "0"       CACHE STRING "12-bit field. Minimum latency (clock cycles) from WVALID&WLAST to BVALID.")
+set(TA1_PULSE_ON    "320"     CACHE STRING "No. of cycles addresses let through (0-65535).")
+set(TA1_PULSE_OFF   "80"      CACHE STRING "No. of cycles addresses blocked (0-65535).")
+set(TA1_BWCAP       "50"      CACHE STRING "16-bit field. Max no. of 64-bit words transfered per pulse cycle 0=infinite")
+set(TA1_PERFCTRL    "0"       CACHE STRING "6-bit field selecting an event for event counter 0=default")
+set(TA1_PERFCNT     "0"       CACHE STRING "32-bit event counter")
+set(TA1_MODE        "1"       CACHE STRING "Bit 0: 1=enable dynamic clocking to avoid underrun;
+                                            Bit 1: 1=enable random AR reordering (0=default);
+                                            Bit 2: 1=enable random R reordering (0=default);
+                                            Bit 3: 1=enable random B reordering (0=default);
+                                            Bit 11-4: Frequency scale 0=full speed, 255=(1/256) speed")
+set(TA1_HISTBIN     "0"       CACHE STRING "Controls which histogram bin (0-15) that should be accessed by HISTCNT.")
+set(TA1_HISTCNT     "0"       CACHE STRING "32-bit field. Read/write the selected histogram bin.")
diff --git a/scripts/cmake/templates/mem_regions.h.template b/scripts/cmake/templates/mem_regions.h.template
new file mode 100644
index 0000000..72978ce
--- /dev/null
+++ b/scripts/cmake/templates/mem_regions.h.template
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef MEM_REGION_DEFS_H
+#define MEM_REGION_DEFS_H
+
+#cmakedefine ITCM_SIZE             (@ITCM_SIZE@)     /* ITCM size */
+#cmakedefine DTCM_BLK_SIZE         (@DTCM_BLK_SIZE@)     /* DTCM size, 4 banks of this size available */
+#cmakedefine BRAM_SIZE             (@BRAM_SIZE@)     /* BRAM size */
+#cmakedefine ISRAM0_SIZE           (@ISRAM0_SIZE@)     /* ISRAM0 size */
+#cmakedefine ISRAM1_SIZE           (@ISRAM1_SIZE@)     /* ISRAM1 size */
+#cmakedefine QSPI_SRAM_SIZE        (@QSPI_SRAM_SIZE@)     /* QSPI Flash size */
+#cmakedefine DDR4_BLK_SIZE         (@DDR4_BLK_SIZE@)     /* DDR4 block size */
+
+#cmakedefine ITCM_BASE_NS          (@ITCM_BASE_NS@)     /* Instruction TCM Non-Secure base address */
+#cmakedefine BRAM_BASE_NS          (@BRAM_BASE_NS@)     /* CODE SRAM Non-Secure base address */
+#cmakedefine DTCM0_BASE_NS         (@DTCM0_BASE_NS@)     /* Data TCM block 0 Non-Secure base address */
+#cmakedefine DTCM1_BASE_NS         (@DTCM1_BASE_NS@)     /* Data TCM block 1 Non-Secure base address */
+#cmakedefine DTCM2_BASE_NS         (@DTCM2_BASE_NS@)     /* Data TCM block 2 Non-Secure base address */
+#cmakedefine DTCM3_BASE_NS         (@DTCM3_BASE_NS@)     /* Data TCM block 3 Non-Secure base address */
+#cmakedefine ISRAM0_BASE_NS        (@ISRAM0_BASE_NS@)     /* Internal SRAM Area Non-Secure base address */
+#cmakedefine ISRAM1_BASE_NS        (@ISRAM1_BASE_NS@)     /* Internal SRAM Area Non-Secure base address */
+#cmakedefine QSPI_SRAM_BASE_NS     (@QSPI_SRAM_BASE_NS@)     /* QSPI SRAM Non-Secure base address */
+#cmakedefine DDR4_BLK0_BASE_NS     (@DDR4_BLK0_BASE_NS@)     /* DDR4 block 0 Non-Secure base address */
+#cmakedefine DDR4_BLK1_BASE_NS     (@DDR4_BLK1_BASE_NS@)     /* DDR4 block 1 Non-Secure base address */
+#cmakedefine DDR4_BLK2_BASE_NS     (@DDR4_BLK2_BASE_NS@)     /* DDR4 block 2 Non-Secure base address */
+#cmakedefine DDR4_BLK3_BASE_NS     (@DDR4_BLK3_BASE_NS@)     /* DDR4 block 3 Non-Secure base address */
+
+#cmakedefine ITCM_BASE_S           (@ITCM_BASE_S@)     /* Instruction TCM Secure base address */
+#cmakedefine BRAM_BASE_S           (@BRAM_BASE_S@)     /* CODE SRAM Secure base address */
+#cmakedefine DTCM0_BASE_S          (@DTCM0_BASE_S@)     /* Data TCM block 0 Secure base address */
+#cmakedefine DTCM1_BASE_S          (@DTCM1_BASE_S@)     /* Data TCM block 1 Secure base address */
+#cmakedefine DTCM2_BASE_S          (@DTCM2_BASE_S@)     /* Data TCM block 2 Secure base address */
+#cmakedefine DTCM3_BASE_S          (@DTCM3_BASE_S@)     /* Data TCM block 3 Secure base address */
+#cmakedefine ISRAM0_BASE_S         (@ISRAM0_BASE_S@)     /* Internal SRAM Area Secure base address */
+#cmakedefine ISRAM1_BASE_S         (@ISRAM1_BASE_S@)     /* Internal SRAM Area Secure base address */
+#cmakedefine DDR4_BLK0_BASE_S      (@DDR4_BLK0_BASE_S@)     /* DDR4 block 0 Secure base address */
+#cmakedefine DDR4_BLK1_BASE_S      (@DDR4_BLK1_BASE_S@)     /* DDR4 block 1 Secure base address */
+#cmakedefine DDR4_BLK2_BASE_S      (@DDR4_BLK2_BASE_S@)     /* DDR4 block 2 Secure base address */
+#cmakedefine DDR4_BLK3_BASE_S      (@DDR4_BLK3_BASE_S@)     /* DDR4 block 3 Secure base address */
+
+#endif /*  MEM_REGION_DEFS_H  */
diff --git a/scripts/cmake/templates/peripheral_irqs.h.template b/scripts/cmake/templates/peripheral_irqs.h.template
new file mode 100644
index 0000000..8e8888b
--- /dev/null
+++ b/scripts/cmake/templates/peripheral_irqs.h.template
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef PERIPHERAL_IRQS_H
+#define PERIPHERAL_IRQS_H
+
+/******************************************************************************/
+/*                    Peripheral interrupt numbers                            */
+/******************************************************************************/
+
+/* -------------------  Cortex-M Processor Exceptions Numbers  -------------- */
+/*                 -14 to -1 should be defined by the system header           */
+/* ----------------------  Core Specific Interrupt Numbers  ------------------*/
+#cmakedefine NONSEC_WATCHDOG_RESET_IRQn (@NONSEC_WATCHDOG_RESET_IRQn@)  /* Non-Secure Watchdog Reset Interrupt   */
+#cmakedefine NONSEC_WATCHDOG_IRQn       (@NONSEC_WATCHDOG_IRQn@)  /* Non-Secure Watchdog Interrupt         */
+#cmakedefine S32K_TIMER_IRQn            (@S32K_TIMER_IRQn@)  /* S32K Timer Interrupt                  */
+#cmakedefine TIMER0_IRQn                (@TIMER0_IRQn@)  /* TIMER 0 Interrupt                     */
+#cmakedefine TIMER1_IRQn                (@TIMER1_IRQn@)  /* TIMER 1 Interrupt                     */
+#cmakedefine DUALTIMER_IRQn             (@DUALTIMER_IRQn@)  /* Dual Timer Interrupt                  */
+#cmakedefine MPC_IRQn                   (@MPC_IRQn@)  /* MPC Combined (@Secure@) Interrupt       */
+#cmakedefine PPC_IRQn                   (@PPC_IRQn@)  /* PPC Combined (@Secure@) Interrupt       */
+#cmakedefine MSC_IRQn                   (@MSC_IRQn@)  /* MSC Combined (@Secure@) Interrput       */
+#cmakedefine BRIDGE_ERROR_IRQn          (@BRIDGE_ERROR_IRQn@)  /* Bridge Error Combined (@Secure@) Interrupt */
+#cmakedefine MGMT_PPU_IRQn              (@MGMT_PPU_IRQn@)  /* MGMT_PPU */
+#cmakedefine SYS_PPU_IRQn               (@SYS_PPU_IRQn@)  /* SYS_PPU */
+#cmakedefine CPU0_PPU_IRQn              (@CPU0_PPU_IRQn@)  /* CPU0_PPU */
+#cmakedefine DEBUG_PPU_IRQn             (@DEBUG_PPU_IRQn@)  /* DEBUG_PPU */
+#cmakedefine TIMER3_AON_IRQn            (@TIMER3_AON_IRQn@)  /* TIMER3_AON */
+#cmakedefine CPU0CTIIQ0_IRQn            (@CPU0CTIIQ0_IRQn@)  /* CPU0CTIIQ0 */
+#cmakedefine CPU0CTIIQ01_IRQn           (@CPU0CTIIQ01_IRQn@)  /* CPU0CTIIQ01 */
+
+#cmakedefine SYS_TSTAMP_COUNTER_IRQn    (@SYS_TSTAMP_COUNTER_IRQn@)  /* System timestamp counter interrupt */
+
+/* ----------------------  CMSDK Specific Interrupt Numbers  ----------------- */
+#cmakedefine UARTRX0_IRQn               (@UARTRX0_IRQn@)  /* UART 0 RX Interrupt                   */
+#cmakedefine UARTTX0_IRQn               (@UARTTX0_IRQn@)  /* UART 0 TX Interrupt                   */
+#cmakedefine UARTRX1_IRQn               (@UARTRX1_IRQn@)  /* UART 1 RX Interrupt                   */
+#cmakedefine UARTTX1_IRQn               (@UARTTX1_IRQn@)  /* UART 1 TX Interrupt                   */
+#cmakedefine UARTRX2_IRQn               (@UARTRX2_IRQn@)  /* UART 2 RX Interrupt                   */
+#cmakedefine UARTTX2_IRQn               (@UARTTX2_IRQn@)  /* UART 2 TX Interrupt                   */
+#cmakedefine UARTRX3_IRQn               (@UARTRX3_IRQn@)  /* UART 3 RX Interrupt                   */
+#cmakedefine UARTTX3_IRQn               (@UARTTX3_IRQn@)  /* UART 3 TX Interrupt                   */
+#cmakedefine UARTRX4_IRQn               (@UARTRX4_IRQn@)  /* UART 4 RX Interrupt                   */
+#cmakedefine UARTTX4_IRQn               (@UARTTX4_IRQn@)  /* UART 4 TX Interrupt                   */
+#cmakedefine UART0_IRQn                 (@UART0_IRQn@)  /* UART 0 combined Interrupt             */
+#cmakedefine UART1_IRQn                 (@UART1_IRQn@)  /* UART 1 combined Interrupt             */
+#cmakedefine UART2_IRQn                 (@UART2_IRQn@)  /* UART 2 combined Interrupt             */
+#cmakedefine UART3_IRQn                 (@UART3_IRQn@)  /* UART 3 combined Interrupt             */
+#cmakedefine UART4_IRQn                 (@UART4_IRQn@)  /* UART 4 combined Interrupt             */
+#cmakedefine UARTOVF_IRQn               (@UARTOVF_IRQn@)  /* UART 0,1,2,3 and 4 Overflow Interrupt */
+#cmakedefine ETHERNET_IRQn              (@ETHERNET_IRQn@)  /* Ethernet Interrupt                    */
+#cmakedefine I2S_IRQn                   (@I2S_IRQn@)  /* I2S Interrupt                         */
+#cmakedefine TSC_IRQn                   (@TSC_IRQn@)  /* Touch Screen Interrupt                */
+#cmakedefine SPI2_IRQn                  (@SPI2_IRQn@)  /* SPI 2 Interrupt                       */
+#cmakedefine SPI3_IRQn                  (@SPI3_IRQn@)  /* SPI 3 Interrupt                       */
+#cmakedefine SPI4_IRQn                  (@SPI4_IRQn@)  /* SPI 4 Interrupt                       */
+
+#cmakedefine EthosU_IRQn                (@EthosU_IRQn@)   /* Ethos-Uxx Interrupt */
+
+#cmakedefine GPIO0_IRQn                 (@GPIO0_IRQn@)  /* GPIO 0 Combined Interrupt             */
+#cmakedefine GPIO1_IRQn                 (@GPIO1_IRQn@)  /* GPIO 1 Combined Interrupt             */
+#cmakedefine GPIO2_IRQn                 (@GPIO2_IRQn@)  /* GPIO 2 Combined Interrupt             */
+#cmakedefine GPIO3_IRQn                 (@GPIO3_IRQn@)  /* GPIO 3 Combined Interrupt             */
+
+#cmakedefine GPIO0_0_IRQn               (@GPIO0_0_IRQn@)  /* All P0 I/O pins used as irq source    */
+#cmakedefine GPIO0_1_IRQn               (@GPIO0_1_IRQn@)  /* There are 16 pins in total            */
+#cmakedefine GPIO0_2_IRQn               (@GPIO0_2_IRQn@)
+#cmakedefine GPIO0_3_IRQn               (@GPIO0_3_IRQn@)
+#cmakedefine GPIO0_4_IRQn               (@GPIO0_4_IRQn@)
+#cmakedefine GPIO0_5_IRQn               (@GPIO0_5_IRQn@)
+#cmakedefine GPIO0_6_IRQn               (@GPIO0_6_IRQn@)
+#cmakedefine GPIO0_7_IRQn               (@GPIO0_7_IRQn@)
+#cmakedefine GPIO0_8_IRQn               (@GPIO0_8_IRQn@)
+#cmakedefine GPIO0_9_IRQn               (@GPIO0_9_IRQn@)
+#cmakedefine GPIO0_10_IRQn              (@GPIO0_10_IRQn@)
+#cmakedefine GPIO0_11_IRQn              (@GPIO0_11_IRQn@)
+#cmakedefine GPIO0_12_IRQn              (@GPIO0_12_IRQn@)
+#cmakedefine GPIO0_13_IRQn              (@GPIO0_13_IRQn@)
+#cmakedefine GPIO0_14_IRQn              (@GPIO0_14_IRQn@)
+#cmakedefine GPIO0_15_IRQn              (@GPIO0_15_IRQn@)
+#cmakedefine GPIO1_0_IRQn               (@GPIO1_0_IRQn@)  /* All P1 I/O pins used as irq source    */
+#cmakedefine GPIO1_1_IRQn               (@GPIO1_1_IRQn@)  /* There are 16 pins in total            */
+#cmakedefine GPIO1_2_IRQn               (@GPIO1_2_IRQn@)
+#cmakedefine GPIO1_3_IRQn               (@GPIO1_3_IRQn@)
+#cmakedefine GPIO1_4_IRQn               (@GPIO1_4_IRQn@)
+#cmakedefine GPIO1_5_IRQn               (@GPIO1_5_IRQn@)
+#cmakedefine GPIO1_6_IRQn               (@GPIO1_6_IRQn@)
+#cmakedefine GPIO1_7_IRQn               (@GPIO1_7_IRQn@)
+#cmakedefine GPIO1_8_IRQn               (@GPIO1_8_IRQn@)
+#cmakedefine GPIO1_9_IRQn               (@GPIO1_9_IRQn@)
+#cmakedefine GPIO1_10_IRQn              (@GPIO1_10_IRQn@)
+#cmakedefine GPIO1_11_IRQn              (@GPIO1_11_IRQn@)
+#cmakedefine GPIO1_12_IRQn              (@GPIO1_12_IRQn@)
+#cmakedefine GPIO1_13_IRQn              (@GPIO1_13_IRQn@)
+#cmakedefine GPIO1_14_IRQn              (@GPIO1_14_IRQn@)
+#cmakedefine GPIO1_15_IRQn              (@GPIO1_15_IRQn@)
+#cmakedefine GPIO2_0_IRQn               (@GPIO2_0_IRQn@)  /* All P2 I/O pins used as irq source    */
+#cmakedefine GPIO2_1_IRQn               (@GPIO2_1_IRQn@)  /* There are 15 pins in total            */
+#cmakedefine GPIO2_2_IRQn               (@GPIO2_2_IRQn@)
+#cmakedefine GPIO2_3_IRQn               (@GPIO2_3_IRQn@)
+#cmakedefine GPIO2_4_IRQn               (@GPIO2_4_IRQn@)
+#cmakedefine GPIO2_5_IRQn               (@GPIO2_5_IRQn@)
+#cmakedefine GPIO2_6_IRQn               (@GPIO2_6_IRQn@)
+#cmakedefine GPIO2_7_IRQn               (@GPIO2_7_IRQn@)
+#cmakedefine GPIO2_8_IRQn               (@GPIO2_8_IRQn@)
+#cmakedefine GPIO2_9_IRQn               (@GPIO2_9_IRQn@)
+#cmakedefine GPIO2_10_IRQn              (@GPIO2_10_IRQn@)
+#cmakedefine GPIO2_11_IRQn              (@GPIO2_11_IRQn@)
+#cmakedefine GPIO2_12_IRQn              (@GPIO2_12_IRQn@)
+#cmakedefine GPIO2_13_IRQn              (@GPIO2_13_IRQn@)
+#cmakedefine GPIO2_14_IRQn              (@GPIO2_14_IRQn@)
+#cmakedefine GPIO2_15_IRQn              (@GPIO2_15_IRQn@)
+#cmakedefine GPIO3_0_IRQn               (@GPIO3_0_IRQn@)  /* All P3 I/O pins used as irq source    */
+#cmakedefine GPIO3_1_IRQn               (@GPIO3_1_IRQn@)  /* There are 4 pins in total             */
+#cmakedefine GPIO3_2_IRQn               (@GPIO3_2_IRQn@)
+#cmakedefine GPIO3_3_IRQn               (@GPIO3_3_IRQn@)
+#cmakedefine UARTRX5_IRQn               (@UARTRX5_IRQn@)  /* UART 5 RX Interrupt                   */
+#cmakedefine UARTTX5_IRQn               (@UARTTX5_IRQn@)  /* UART 5 TX Interrupt                   */
+#cmakedefine UART5_IRQn                 (@UART5_IRQn@)  /* UART 5 combined Interrupt             */
+#cmakedefine HDCLCD_IRQn                (@HDCLCD_IRQn@)  /* HDCLCD Interrupt                      */
+
+#endif /* PERIPHERAL_IRQS_H */
diff --git a/scripts/cmake/templates/peripheral_memmap.h.template b/scripts/cmake/templates/peripheral_memmap.h.template
new file mode 100644
index 0000000..050d7d7
--- /dev/null
+++ b/scripts/cmake/templates/peripheral_memmap.h.template
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef PERIPHERAL_MEMMAP_H
+#define PERIPHERAL_MEMMAP_H
+
+#cmakedefine DESIGN_NAME              "@DESIGN_NAME@"
+
+/******************************************************************************/
+/*                         Peripheral memory map                              */
+/******************************************************************************/
+
+#cmakedefine CMSDK_GPIO0_BASE         (@CMSDK_GPIO0_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine CMSDK_GPIO1_BASE         (@CMSDK_GPIO1_BASE@)       /* User GPIO 1 Base Address   */
+#cmakedefine CMSDK_GPIO2_BASE         (@CMSDK_GPIO2_BASE@)       /* User GPIO 2 Base Address   */
+#cmakedefine CMSDK_GPIO3_BASE         (@CMSDK_GPIO3_BASE@)       /* User GPIO 3 Base Address   */
+
+#cmakedefine AHB_USER0_BASE           (@AHB_USER0_BASE@)       /* AHB USER 0 Base Address (4KB) */
+#cmakedefine AHB_USER1_BASE           (@AHB_USER1_BASE@)       /* AHB USER 1 Base Address (4KB)*/
+#cmakedefine AHB_USER2_BASE           (@AHB_USER2_BASE@)       /* AHB USER 2 Base Address (4KB)*/
+#cmakedefine AHB_USER3_BASE           (@AHB_USER3_BASE@)       /* AHB USER 3 Base Address (4KB)*/
+
+#cmakedefine DMA0_BASE                (@DMA0_BASE@)       /* DMA0 (4KB) */
+#cmakedefine DMA1_BASE                (@DMA1_BASE@)       /* DMA1 (4KB) */
+#cmakedefine DMA2_BASE                (@DMA2_BASE@)       /* DMA2 (4KB) */
+#cmakedefine DMA3_BASE                (@DMA3_BASE@)       /* DMA3 (4KB) */
+
+#cmakedefine USER_APB0_BASE           (@USER_APB0_BASE@)       /* User APB0 */
+#cmakedefine USER_APB1_BASE           (@USER_APB1_BASE@)       /* User APB1 */
+#cmakedefine USER_APB2_BASE           (@USER_APB2_BASE@)       /* User APB2 */
+#cmakedefine USER_APB3_BASE           (@USER_APB3_BASE@)       /* User APB3 */
+
+#cmakedefine MPS3_I2C0_BASE           (@MPS3_I2C0_BASE@)       /* Touch Screen I2C Base Address */
+#cmakedefine MPS3_I2C1_BASE           (@MPS3_I2C1_BASE@)       /* Audio Interface I2C Base Address */
+#cmakedefine MPS3_SSP2_BASE           (@MPS3_SSP2_BASE@)       /* ADC SPI PL022 Base Address   */
+#cmakedefine MPS3_SSP3_BASE           (@MPS3_SSP3_BASE@)       /* Shield 0 SPI PL022 Base Address   */
+
+#cmakedefine MPS3_SSP4_BASE           (@MPS3_SSP4_BASE@)       /* Shield 1 SPI PL022 Base Address   */
+#cmakedefine MPS3_I2C2_BASE           (@MPS3_I2C2_BASE@)       /* Shield 0 SBCon Base Address */
+#cmakedefine MPS3_I2C3_BASE           (@MPS3_I2C3_BASE@)       /* Shield 1 SBCon Base Address */
+
+#cmakedefine USER_APB_BASE            (@USER_APB_BASE@)       /* User APB Base Address */
+#cmakedefine MPS3_I2C4_BASE           (@MPS3_I2C4_BASE@)       /* HDMI I2C SBCon Base Address */
+#cmakedefine MPS3_I2C5_BASE           (@MPS3_I2C5_BASE@)       /* DDR EPROM I2C SBCon Base Address */
+#cmakedefine MPS3_SCC_BASE            (@MPS3_SCC_BASE@)       /* SCC Base Address    */
+#cmakedefine MPS3_AAIC_I2S_BASE       (@MPS3_AAIC_I2S_BASE@)       /* Audio Interface I2S Base Address */
+#cmakedefine MPS3_FPGAIO_BASE         (@MPS3_FPGAIO_BASE@)       /* FPGA IO Base Address */
+#cmakedefine PL011_UART0_BASE         (@PL011_UART0_BASE@)       /* PL011 UART0 Base Address */
+#cmakedefine CMSDK_UART0_BASE         (@CMSDK_UART0_BASE@)       /* UART 0 Base Address */
+#cmakedefine CMSDK_UART1_BASE         (@CMSDK_UART1_BASE@)       /* UART 1 Base Address */
+#cmakedefine CMSDK_UART2_BASE         (@CMSDK_UART2_BASE@)       /* UART 2 Base Address */
+#cmakedefine CMSDK_UART3_BASE         (@CMSDK_UART3_BASE@)       /* UART 3 Base Address Shield 0*/
+
+#cmakedefine ETHOS_U55_BASE           (@ETHOS_U55_BASE@)    /* Ethos-U55 base address*/
+#cmakedefine ETHOS_U55_TA0_BASE       (@ETHOS_U55_TA0_BASE@)    /* Ethos-U55's timing adapter 0 base address */
+#cmakedefine ETHOS_U55_TA1_BASE       (@ETHOS_U55_TA1_BASE@)    /* Ethos-U55's timing adapter 1 base address */
+
+#cmakedefine CMSDK_UART4_BASE         (@CMSDK_UART4_BASE@)       /* UART 4 Base Address Shield 1*/
+#cmakedefine CMSDK_UART5_BASE         (@CMSDK_UART5_BASE@)       /* UART 5 Base Address */
+#cmakedefine HDMI_AUDIO_BASE          (@HDMI_AUDIO_BASE@)       /* HDMI AUDIO Base Address */
+#cmakedefine CLCD_CONFIG_BASE         (@CLCD_CONFIG_BASE@)       /* CLCD CONFIG Base Address */
+#cmakedefine RTC_BASE                 (@RTC_BASE@)       /* RTC Base address */
+#cmakedefine SMSC9220_BASE            (@SMSC9220_BASE@)       /* Ethernet SMSC9220 Base Address */
+#cmakedefine USB_BASE                 (@USB_BASE@)       /* USB Base Address */
+#cmakedefine CMSDK_SDIO_BASE          (@CMSDK_SDIO_BASE@)       /* User SDIO Base Address   */
+#cmakedefine MPS3_CLCD_BASE           (@MPS3_CLCD_BASE@)       /* HDLCD Base Address   */
+#cmakedefine MPS3_eMMC_BASE           (@MPS3_eMMC_BASE@)       /* User eMMC Base Address   */
+#cmakedefine USER_BASE                (@USER_BASE@)       /* User ? Base Address */
+
+#cmakedefine QSPI_XIP_BASE            (@QSPI_XIP_BASE@)       /* QSPI XIP config Base Address */
+#cmakedefine QSPI_WRITE_BASE          (@QSPI_WRITE_BASE@)       /* QSPI write config Base Address */
+
+/******************************************************************************/
+/*                      Secure Peripheral memory map                          */
+/******************************************************************************/
+
+#cmakedefine MPC_ISRAM0_BASE_S        (@MPC_ISRAM0_BASE_S@)       /* ISRAM0 Memory Protection Controller Secure base address */
+#cmakedefine MPC_ISRAM1_BASE_S        (@MPC_ISRAM1_BASE_S@)       /* ISRAM1 Memory Protection Controller Secure base address */
+
+#cmakedefine SEC_CMSDK_GPIO0_BASE     (@SEC_CMSDK_GPIO0_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine SEC_CMSDK_GPIO1_BASE     (@SEC_CMSDK_GPIO1_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine SEC_CMSDK_GPIO2_BASE     (@SEC_CMSDK_GPIO2_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine SEC_CMSDK_GPIO3_BASE     (@SEC_CMSDK_GPIO3_BASE@)       /* User GPIO 0 Base Address   */
+
+#cmakedefine SEC_AHB_USER0_BASE       (@SEC_AHB_USER0_BASE@)       /* AHB USER 0 Base Address (4KB) */
+#cmakedefine SEC_AHB_USER1_BASE       (@SEC_AHB_USER1_BASE@)       /* AHB USER 1 Base Address (4KB)*/
+#cmakedefine SEC_AHB_USER2_BASE       (@SEC_AHB_USER2_BASE@)       /* AHB USER 2 Base Address (4KB)*/
+#cmakedefine SEC_AHB_USER3_BASE       (@SEC_AHB_USER3_BASE@)       /* AHB USER 3 Base Address (4KB)*/
+
+#cmakedefine SEC_DMA0_BASE            (@SEC_DMA0_BASE@)       /* DMA0 (4KB) */
+#cmakedefine SEC_DMA1_BASE            (@SEC_DMA1_BASE@)       /* DMA1 (4KB) */
+#cmakedefine SEC_DMA2_BASE            (@SEC_DMA2_BASE@)       /* DMA2 (4KB) */
+#cmakedefine SEC_DMA3_BASE            (@SEC_DMA3_BASE@)       /* DMA3 (4KB) */
+
+#cmakedefine SEC_USER_APB0_BASE       (@SEC_USER_APB0_BASE@)       /* User APB0 */
+#cmakedefine SEC_USER_APB1_BASE       (@SEC_USER_APB1_BASE@)       /* User APB1 */
+#cmakedefine SEC_USER_APB2_BASE       (@SEC_USER_APB2_BASE@)       /* User APB2 */
+#cmakedefine SEC_USER_APB3_BASE       (@SEC_USER_APB3_BASE@)       /* User APB3 */
+
+#cmakedefine SEC_MPS3_I2C0_BASE       (@SEC_MPS3_I2C0_BASE@)       /* Touch Screen I2C Base Address */
+#cmakedefine SEC_MPS3_I2C1_BASE       (@SEC_MPS3_I2C1_BASE@)       /* Audio Interface I2C Base Address */
+#cmakedefine SEC_MPS3_SSP2_BASE       (@SEC_MPS3_SSP2_BASE@)       /* ADC SPI PL022 Base Address   */
+#cmakedefine SEC_MPS3_SSP3_BASE       (@SEC_MPS3_SSP3_BASE@)       /* Shield 0 SPI PL022 Base Address   */
+
+#cmakedefine SEC_MPS3_SSP4_BASE       (@SEC_MPS3_SSP4_BASE@)       /* Shield 1 SPI PL022 Base Address   */
+#cmakedefine SEC_MPS3_I2C2_BASE       (@SEC_MPS3_I2C2_BASE@)       /* Shield 0 SBCon Base Address */
+#cmakedefine SEC_MPS3_I2C3_BASE       (@SEC_MPS3_I2C3_BASE@)       /* Shield 1 SBCon Base Address */
+
+#cmakedefine SEC_MPS3_I2C4_BASE       (@SEC_MPS3_I2C4_BASE@)       /* HDMI I2C SBCon Base Address */
+#cmakedefine SEC_MPS3_I2C5_BASE       (@SEC_MPS3_I2C5_BASE@)       /* DDR EPROM I2C SBCon Base Address */
+#cmakedefine SEC_MPS3_SCC_BASE        (@SEC_MPS3_SCC_BASE@)       /* SCC Base Address    */
+#cmakedefine SEC_MPS3_AAIC_I2S_BASE   (@SEC_MPS3_AAIC_I2S_BASE@)       /* Audio Interface I2S Base Address */
+#cmakedefine SEC_MPS3_FPGAIO_BASE     (@SEC_MPS3_FPGAIO_BASE@)       /* FPGA IO Base Address */
+#cmakedefine SEC_CMSDK_UART0_BASE     (@SEC_CMSDK_UART0_BASE@)       /* UART 0 Base Address */
+#cmakedefine SEC_CMSDK_UART1_BASE     (@SEC_CMSDK_UART1_BASE@)       /* UART 1 Base Address */
+#cmakedefine SEC_CMSDK_UART2_BASE     (@SEC_CMSDK_UART2_BASE@)       /* UART 2 Base Address */
+#cmakedefine SEC_CMSDK_UART3_BASE     (@SEC_CMSDK_UART3_BASE@)       /* UART 3 Base Address Shield 0*/
+
+#cmakedefine SEC_CMSDK_UART4_BASE     (@SEC_CMSDK_UART4_BASE@)       /* UART 4 Base Address Shield 1*/
+#cmakedefine SEC_CMSDK_UART5_BASE     (@SEC_CMSDK_UART5_BASE@)       /* UART 5 Base Address */
+#cmakedefine SEC_HDMI_AUDIO_BASE      (@SEC_HDMI_AUDIO_BASE@)       /* HDMI AUDIO Base Address */
+#cmakedefine SEC_CLCD_CONFIG_BASE     (@SEC_CLCD_CONFIG_BASE@)       /* CLCD CONFIG Base Address */
+#cmakedefine SEC_RTC_BASE             (@SEC_RTC_BASE@)       /* RTC Base address */
+#cmakedefine SEC_SMSC9220_BASE        (@SEC_SMSC9220_BASE@)       /* Ethernet SMSC9220 Base Address */
+#cmakedefine SEC_USB_BASE             (@SEC_USB_BASE@)       /* USB Base Address */
+
+#cmakedefine SEC_ETHOS_U55_BASE       (@SEC_ETHOS_U55_BASE@)   /* Ethos-U55 base address*/
+#cmakedefine SEC_ETHOS_U55_TA0_BASE   (@SEC_ETHOS_U55_TA0_BASE@)   /* Ethos-U55's timing adapter 0 base address */
+#cmakedefine SEC_ETHOS_U55_TA1_BASE   (@SEC_ETHOS_U55_TA1_BASE@)   /* Ethos-U55's timing adapter 1 base address */
+
+#cmakedefine SEC_USER_BASE            (@SEC_USER_BASE@)       /* User ? Base Address */
+
+#cmakedefine SEC_QSPI_XIP_BASE        (@SEC_QSPI_XIP_BASE@)       /* QSPI XIP config Base Address */
+#cmakedefine SEC_QSPI_WRITE_BASE      (@SEC_QSPI_WRITE_BASE@)       /* QSPI write config Base Address */
+
+/******************************************************************************/
+/*                                  MPCs                                      */
+/******************************************************************************/
+
+#cmakedefine MPC_ISRAM0_BASE_S        (@MPC_ISRAM0_BASE_S@)       /* Internal SRAM 0 MPC */
+#cmakedefine MPC_ISRAM1_BASE_S        (@MPC_ISRAM1_BASE_S@)       /* Internal SRAM 1 MPC */
+#cmakedefine MPC_BRAM_BASE_S          (@MPC_BRAM_BASE_S@)       /* SRAM Memory Protection Controller Secure base address */
+#cmakedefine MPC_QSPI_BASE_S          (@MPC_QSPI_BASE_S@)       /* QSPI Memory Protection Controller Secure base address */
+#cmakedefine MPC_DDR4_BASE_S          (@MPC_DDR4_BASE_S@)       /* DDR4 Memory Protection Controller Secure base address */
+
+#endif /* PERIPHERAL_MEMMAP_H */
diff --git a/scripts/cmake/templates/timing_adapter_settings.template b/scripts/cmake/templates/timing_adapter_settings.template
new file mode 100644
index 0000000..d5e202a
--- /dev/null
+++ b/scripts/cmake/templates/timing_adapter_settings.template
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef TIMING_ADAPTER_SETTINGS_H
+#define TIMING_ADAPTER_SETTINGS_H
+
+#cmakedefine TA0_BASE       (@TA0_BASE@)
+#cmakedefine TA1_BASE       (@TA1_BASE@)
+
+/* Timing adapter settings for AXI0 */
+#if defined(TA0_BASE)
+
+#define TA0_MAXR           (@TA0_MAXR@)
+#define TA0_MAXW           (@TA0_MAXW@)
+#define TA0_MAXRW          (@TA0_MAXRW@)
+#define TA0_RLATENCY       (@TA0_RLATENCY@)
+#define TA0_WLATENCY       (@TA0_WLATENCY@)
+#define TA0_PULSE_ON       (@TA0_PULSE_ON@)
+#define TA0_PULSE_OFF      (@TA0_PULSE_OFF@)
+#define TA0_BWCAP          (@TA0_BWCAP@)
+#define TA0_PERFCTRL       (@TA0_PERFCTRL@)
+#define TA0_PERFCNT        (@TA0_PERFCNT@)
+#define TA0_MODE           (@TA0_MODE@)
+#define TA0_HISTBIN        (@TA0_HISTBIN@)
+#define TA0_HISTCNT        (@TA0_HISTCNT@)
+
+#endif /* defined(TA0_BASE) */
+
+/* Timing adapter settings for AXI1 */
+#if defined(TA1_BASE)
+
+#define TA1_MAXR           (@TA1_MAXR@)
+#define TA1_MAXW           (@TA1_MAXW@)
+#define TA1_MAXRW          (@TA1_MAXRW@)
+#define TA1_RLATENCY       (@TA1_RLATENCY@)
+#define TA1_WLATENCY       (@TA1_WLATENCY@)
+#define TA1_PULSE_ON       (@TA1_PULSE_ON@)
+#define TA1_PULSE_OFF      (@TA1_PULSE_OFF@)
+#define TA1_BWCAP          (@TA1_BWCAP@)
+#define TA1_PERFCTRL       (@TA1_PERFCTRL@)
+#define TA1_PERFCNT        (@TA1_PERFCNT@)
+#define TA1_MODE           (@TA1_MODE@)
+#define TA1_HISTBIN        (@TA1_HISTBIN@)
+#define TA1_HISTCNT        (@TA1_HISTCNT@)
+
+#endif /* defined(TA1_BASE) */
+
+#endif /* TIMING_ADAPTER_SETTINGS_H */
\ No newline at end of file
diff --git a/scripts/cmake/tensorflow.cmake b/scripts/cmake/tensorflow.cmake
new file mode 100644
index 0000000..1123c7f
--- /dev/null
+++ b/scripts/cmake/tensorflow.cmake
@@ -0,0 +1,130 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+include(ProcessorCount)
+ProcessorCount(J)
+
+if (CMAKE_BUILD_TYPE STREQUAL Debug)
+    set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "debug")
+    set(TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL "-O0")
+elseif (CMAKE_BUILD_TYPE STREQUAL Release)
+    set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "release")
+    set(TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL "-O3")
+elseif(CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo)
+    set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "release_with_logs")
+    # No override for optimsiation level; we rely on the default
+    # optimisation applied by TensorFlow Lite Micro build here.
+elseif (NOT DEFINED TENSORFLOW_LITE_MICRO_BUILD_TYPE)
+    message(WARNING     "TENSORFLOW_LITE_MICRO_BUILD_TYPE is not set.")
+    message(FATAL_ERROR "Build type ${CMAKE_BUILD_TYPE} does not have a corresponding "
+                        "default to set TensorFlow build type")
+endif()
+
+USER_OPTION(TENSORFLOW_LITE_MICRO_BUILD_TYPE "TensorFlow Lite Mirco build type (release/debug etc.)"
+    ${TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE}
+    STRING)
+
+USER_OPTION(TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS "Select if TPIP downloads should be cleaned before each build."
+    OFF
+    BOOL)
+
+USER_OPTION(TENSORFLOW_LITE_MICRO_CLEAN_BUILD "Select if clean target should be added to a list of targets"
+    ON
+    BOOL)
+
+if (CMAKE_CXX_COMPILER_ID STREQUAL "ARMClang")
+    set(TENSORFLOW_LITE_MICRO_TOOLCHAIN "armclang")
+elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+    set(TENSORFLOW_LITE_MICRO_TOOLCHAIN "gcc")
+else ()
+    message(FATAL_ERROR "No compiler ID is set")
+endif()
+
+get_filename_component(TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT ${CMAKE_C_COMPILER} DIRECTORY)
+set(TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT "${TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT}/")
+
+set(TENSORFLOW_LITE_MICRO_PATH "${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro")
+set(TENSORFLOW_LITE_MICRO_GENDIR ${CMAKE_CURRENT_BINARY_DIR}/tensorflow/)
+
+set(CMSIS_DSP_MAKEFILE_INC ${CMAKE_CURRENT_SOURCE_DIR}/scripts/make/cmsis_dsp.inc)
+set(ETHOS_EVAL_TARGET_MAKEFILE_INC ${CMAKE_CURRENT_SOURCE_DIR}/scripts/make/cortex_m_ethos_eval_makefile.inc)
+
+if (TARGET_PLATFORM STREQUAL native)
+    set(TENSORFLOW_LITE_MICRO_TARGET "linux")
+    set(TENSORFLOW_LITE_MICRO_TARGET_ARCH x86_64)
+else()
+    set(TENSORFLOW_LITE_MICRO_TARGET "cortex_m_ethos_eval")
+    set(TENSORFLOW_LITE_MICRO_TARGET_ARCH ${CMAKE_SYSTEM_PROCESSOR}${CPU_FEATURES})
+    if(ETHOS_U55_ENABLED)
+        # Arm Ethos-U55 NPU is the co-processor for ML workload:
+        set(TENSORFLOW_LITE_MICRO_CO_PROCESSOR  "ethos_u")
+    endif()
+
+    set(TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL  "cmsis_nn")
+
+    # Copy over the target helper (cortex_m_ethos_eval)
+    file(COPY ${ETHOS_EVAL_TARGET_MAKEFILE_INC}
+        DESTINATION ${TENSORFLOW_LITE_MICRO_PATH}/tools/make/targets/)
+endif()
+
+if (TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS)
+    list(APPEND MAKE_TARGETS_LIST "clean_downloads")
+endif()
+
+if (TENSORFLOW_LITE_MICRO_CLEAN_BUILD)
+    list(APPEND MAKE_TARGETS_LIST "clean")
+endif()
+
+# Primary target
+list(APPEND MAKE_TARGETS_LIST "microlite")
+message(STATUS "TensorFlow Lite Micro build to be called for these targets: ${MAKE_TARGETS_LIST}")
+
+# Commands and targets
+add_custom_target(tensorflow_build ALL
+
+    # Command to build the TensorFlow Lite Micro library
+    COMMAND make -j${J} -f ${TENSORFLOW_LITE_MICRO_PATH}/tools/make/Makefile ${MAKE_TARGETS_LIST}
+        TARGET_TOOLCHAIN_ROOT=${TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT}
+        TOOLCHAIN=${TENSORFLOW_LITE_MICRO_TOOLCHAIN}
+        GENDIR=${TENSORFLOW_LITE_MICRO_GENDIR}
+        TARGET=${TENSORFLOW_LITE_MICRO_TARGET}
+        TARGET_ARCH=${TENSORFLOW_LITE_MICRO_TARGET_ARCH}
+        BUILD_TYPE=${TENSORFLOW_LITE_MICRO_BUILD_TYPE}
+        ETHOSU_DRIVER_PATH=${ETHOS_U55_DRIVER_SRC_PATH}
+        CMSIS_PATH=${CMSIS_SRC_PATH}
+
+        # Conditional arguments
+        $<$<BOOL:${ARMCLANG_DEBUG_DWARF_LEVEL}>:ARMCLANG_DEBUG_DWARF_LEVEL=${ARMCLANG_DEBUG_DWARF_LEVEL}>
+        $<$<BOOL:${TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL}>:OPTIMIZATION_LEVEL=${TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL}>
+        $<$<BOOL:${TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL}>:OPTIMIZED_KERNEL_DIR=${TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL}>
+        $<$<BOOL:${TENSORFLOW_LITE_MICRO_CO_PROCESSOR}>:CO_PROCESSOR=${TENSORFLOW_LITE_MICRO_CO_PROCESSOR}>
+
+    # Command to copy over the generated library to the local build tree.
+    COMMAND ${CMAKE_COMMAND} -E copy  ${TENSORFLOW_LITE_MICRO_GENDIR}/lib/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+            ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+
+    COMMENT "Building TensorFlow Lite Micro library..."
+
+    BYPRODUCTS ${TENSORFLOW_SRC_PATH}/tensorflow/tensorflow/lite/micro/tools/make/downloads
+                ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+                ${TENSORFLOW_LITE_MICRO_GENDIR}/lib/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+
+    WORKING_DIRECTORY ${TENSORFLOW_SRC_PATH})
+
+# Create library
+add_library(tensorflow-lite-micro STATIC IMPORTED)
+add_dependencies(tensorflow-lite-micro tensorflow_build)
diff --git a/scripts/cmake/util_functions.cmake b/scripts/cmake/util_functions.cmake
new file mode 100644
index 0000000..6d76131
--- /dev/null
+++ b/scripts/cmake/util_functions.cmake
@@ -0,0 +1,143 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+##############################################################################
+# Helper function to provide user option and corresponding default value
+##############################################################################
+function(USER_OPTION name description default type)
+
+    if (NOT DEFINED ${name})
+        set(${name} ${default} CACHE ${type} ${description})
+    endif()
+
+    # if it is a path
+    if (${type} STREQUAL PATH)
+
+        # Get the absolute path, relative to the cmake root
+        get_filename_component(ABSPATH "${${name}}" ABSOLUTE BASE_DIR ${CMAKE_SOURCE_DIR})
+
+        # check that this is a directory
+        if (NOT IS_DIRECTORY ${ABSPATH})
+            message(FATAL_ERROR
+                "Invalid directory path. Description: ${description}; Path: ${ABSPATH}")
+        endif()
+
+        set(${name} ${ABSPATH} CACHE ${type} ${description} FORCE)
+
+    # if this is a file path
+    elseif(${type} STREQUAL FILEPATH)
+
+        # Get the absolute path, relative to the cmake root
+        get_filename_component(ABSPATH "${${name}}" ABSOLUTE BASE_DIR ${CMAKE_SOURCE_DIR})
+
+        # check that the file exists:
+        if (NOT EXISTS ${ABSPATH})
+            message(FATAL_ERROR
+                "Invalid file path. Description: ${description}; Path: ${ABSPATH}")
+        endif()
+
+        set(${name} ${ABSPATH} CACHE ${type} ${description} FORCE)
+
+    endif()
+
+    message(STATUS "User option ${name} is set to ${${name}}")
+    LIST(APPEND USER_OPTIONS ${name})
+    set(USER_OPTIONS ${USER_OPTIONS} CACHE INTERNAL "")
+
+endfunction()
+
+# Function to get the path type for a variable
+# Args:
+#   path_var[in]:           path variable for which the cmake path type is requested
+#   cmake_path_type[out]:   CMake path type. Set to FILEPATH when it is a file
+#                           or PATH when it points to a directory. If the path
+#                           is invalid, this remains empty.
+function(get_path_type path_var cmake_path_type)
+    # Validate path - get absolute value
+    get_filename_component(ABSPATH "${path_var}" ABSOLUTE
+                           BASE_DIR ${CMAKE_SOURCE_DIR})
+
+    if (DEFINED path_var)
+        if (IS_DIRECTORY ${ABSPATH})
+            set(${cmake_path_type} PATH PARENT_SCOPE)
+            message(STATUS "Variable of PATH type")
+        elseif(EXISTS ${ABSPATH})
+            set(${cmake_path_type} FILEPATH PARENT_SCOPE)
+        else()
+            set(${cmake_path_type} "" PARENT_SCOPE)
+        endif()
+    else()
+        set(${cmake_path_type} UNINITIALIZED PARENT_SCOPE)
+    endif()
+
+endfunction()
+
+# Function to print all the user options added using the function `USER_OPTION`
+function(print_useroptions)
+    message(STATUS "--------------------------------------------------------------------------------------------------")
+    message(STATUS "Defined build user options:")
+    message(STATUS "")
+    foreach(opt ${USER_OPTIONS})
+        message(STATUS "    ${opt}=${${opt}}")
+    endforeach()
+    message(STATUS "--------------------------------------------------------------------------------------------------")
+endfunction()
+
+function (SUBDIRLIST result curdir)
+    file(GLOB children RELATIVE ${curdir} ${curdir}/*)
+    set(dirlist "")
+    foreach(child ${children})
+        if(IS_DIRECTORY ${curdir}/${child})
+            LIST(APPEND dirlist ${child})
+        endif()
+    endforeach()
+    set(${result} ${dirlist} PARENT_SCOPE)
+endfunction()
+
+function(to_py_bool cmake_bool py_bool)
+    if(${${cmake_bool}})
+        set(${py_bool} True PARENT_SCOPE)
+    else()
+        set(${py_bool} False PARENT_SCOPE)
+    endif()
+endfunction()
+
+# Function to download a files from the Arm Model Zoo
+# Arguments:
+#   file_sub_path: subpath within the model zoo respository
+#   download_path: location where this file is to be downloaded (path including filename)
+function(download_file_from_modelzoo file_sub_path download_path)
+
+    set(MODEL_ZOO_REPO      "https://github.com/ARM-software/ML-zoo/raw")
+    set(MODEL_ZOO_VERSION   "68b5fbc77ed28e67b2efc915997ea4477c1d9d5b")
+
+    string(JOIN "/" FILE_URL
+        ${MODEL_ZOO_REPO} ${MODEL_ZOO_VERSION} ${file_sub_path})
+
+    message(STATUS "Downloading ${FILE_URL} to ${download_path}...")
+
+    file(DOWNLOAD ${FILE_URL} ${download_path}
+        STATUS DOWNLOAD_STATE)
+    list(GET DOWNLOAD_STATE 0 RET_VAL)
+
+    if(${RET_VAL})
+        list(GET DOWNLOAD_STATE 1 RET_MSG)
+        message(FATAL_ERROR "Download failed with error code: ${RET_VAL}; "
+                            "Error message: ${RET_MSG}")
+    endif()
+
+endfunction()
diff --git a/scripts/make/cortex_m_ethos_eval_makefile.inc b/scripts/make/cortex_m_ethos_eval_makefile.inc
new file mode 100644
index 0000000..dbb460d
--- /dev/null
+++ b/scripts/make/cortex_m_ethos_eval_makefile.inc
@@ -0,0 +1,153 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# Generic Makefile target for ARM Cortex M builds.
+# For more info see: tensorflow/lite/micro/cortex_m_generic/README.md
+ifeq ($(TARGET),$(filter $(TARGET), cortex_m_ethos_eval))
+  FLOAT := soft
+  GCC_TARGET_ARCH := $(TARGET_ARCH)
+
+  ifeq ($(TARGET_ARCH), cortex-m0)
+    CORE=M0
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M0
+
+  else ifeq ($(TARGET_ARCH), cortex-m3)
+    CORE=M3
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M3
+
+  else ifeq ($(TARGET_ARCH), cortex-m33)
+    CORE=M33
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M33
+    TARGET_SPECIFIC_FLAGS += -D__DSP_PRESENT=1 -D__FPU_PRESENT=1 -D__VTOR_PRESENT=1 -D__FPU_USED=1
+    FLOAT=hard
+
+  else ifeq ($(TARGET_ARCH), cortex-m33+nodsp)
+    CORE=M33
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M33.no_dsp.no_fp
+
+  else ifeq ($(TARGET_ARCH), cortex-m4)
+    CORE=M4
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M4.no_fp
+    GCC_TARGET_ARCH := cortex-m4+nofp
+
+  else ifeq ($(TARGET_ARCH), cortex-m4+fp)
+    CORE=M4
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M4
+    TARGET_SPECIFIC_FLAGS += -D__FPU_PRESENT=1
+    FLOAT=hard
+    GCC_TARGET_ARCH := cortex-m4
+
+  else ifeq ($(TARGET_ARCH), cortex-m55)
+    CORE=M55
+    ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.fp
+    TARGET_SPECIFIC_FLAGS += -D__DSP_PRESENT=1 -D__FPU_PRESENT=1
+    FLOAT=hard
+
+  else ifeq ($(TARGET_ARCH), cortex-m55+nodsp+nofp)
+    CORE=M55
+    ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.no_dsp.no_fp
+
+  else ifeq ($(TARGET_ARCH), cortex-m55+nofp)
+    CORE=M55
+    ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.no_fp
+    TARGET_SPECIFIC_FLAGS += -D__DSP_PRESENT=1
+
+  else ifeq ($(TARGET_ARCH), cortex-m7)
+    CORE=M7
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M7.no_fp
+    GCC_TARGET_ARCH := cortex-m7+nofp
+
+  else ifeq ($(TARGET_ARCH), cortex-m7+fp)
+    CORE=M7
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M7
+    FLOAT=hard
+    GCC_TARGET_ARCH := cortex-m7
+
+  else
+    $(error "TARGET_ARCH=$(TARGET_ARCH) is not supported")
+  endif
+
+  ifneq ($(filter cortex-m55%,$(TARGET_ARCH)),)
+    # soft-abi=soft disables MVE - use softfp instead for M55.
+    ifeq ($(FLOAT),soft)
+      FLOAT=softfp
+    endif
+  endif
+
+  # Toolchain specfic flags
+  ifeq ($(TOOLCHAIN), armclang)
+    CXX_TOOL  := armclang
+    CC_TOOL   := armclang
+    AR_TOOL   := armar
+    LD        := armlink
+
+    FLAGS_ARMC = \
+      --target=arm-arm-none-eabi \
+      -mcpu=$(TARGET_ARCH)
+
+    # For debug, include specific dwarf format symbols
+    ifeq ($(BUILD_TYPE), debug)
+      ifneq ($(ARMCLANG_DEBUG_DWARF_LEVEL),)
+        FLAGS_ARMC += -gdwarf-$(ARMCLANG_DEBUG_DWARF_LEVEL)
+      endif
+    endif
+
+    CXXFLAGS += $(FLAGS_ARMC)
+    CCFLAGS += $(FLAGS_ARMC)
+    LDFLAGS += $(ARM_LDFLAGS)
+
+    # Arm Compiler will not link the Math library (see below), therefore we're filtering it out.
+    # See Fatal error: L6450U: Cannot find library m:
+    # "Arm Compiler is designed to run in a bare metal environment,
+    # and automatically includes implementations of these functions,
+    # and so no such flag is necessary."
+    # https://developer.arm.com/documentation/100891/0611/troubleshooting/general-troubleshooting-advice
+    MICROLITE_LIBS := $(filter-out -lm,$(MICROLITE_LIBS))
+
+  else ifeq ($(TOOLCHAIN), gcc)
+    export PATH := $(MAKEFILE_DIR)/downloads/gcc_embedded/bin/:$(PATH)
+    DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/arm_gcc_download.sh ${MAKEFILE_DIR}/downloads)
+    ifneq ($(DOWNLOAD_RESULT), SUCCESS)
+      $(error Something went wrong with the GCC download: $(DOWNLOAD_RESULT))
+    endif
+
+    TARGET_TOOLCHAIN_PREFIX := arm-none-eabi-
+
+    FLAGS_GCC = -mcpu=$(GCC_TARGET_ARCH) -mfpu=auto
+    CXXFLAGS += $(FLAGS_GCC)
+    CCFLAGS += $(FLAGS_GCC)
+
+  else
+    $(error "TOOLCHAIN=$(TOOLCHAIN) is not supported.")
+  endif
+
+  PLATFORM_FLAGS = \
+    -DTF_LITE_MCU_DEBUG_LOG \
+    -mthumb \
+    -mfloat-abi=$(FLOAT) \
+    -funsigned-char \
+    -mlittle-endian \
+    -Wno-type-limits \
+    -Wno-unused-private-field \
+    -fomit-frame-pointer \
+    -MD \
+    -DCPU_CORTEX_$(CORE)=1 \
+    $(TARGET_SPECIFIC_FLAGS)
+
+  # Common + C/C++ flags
+  CXXFLAGS += $(PLATFORM_FLAGS)
+  CCFLAGS += $(PLATFORM_FLAGS)
+
+endif
diff --git a/scripts/py/gen_audio.py b/scripts/py/gen_audio.py
new file mode 100644
index 0000000..53ed019
--- /dev/null
+++ b/scripts/py/gen_audio.py
@@ -0,0 +1,48 @@
+#!env/bin/python3
+
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+Utility script to convert an audio clip into eval platform desired spec.
+"""
+import soundfile as sf
+
+from argparse import ArgumentParser
+from os import path
+
+from gen_utils import AudioUtils
+
+parser = ArgumentParser()
+parser.add_argument("--audio_path", help="Audio file path", required=True)
+parser.add_argument("--output_dir", help="Output directory", required=True)
+parser.add_argument("--sampling_rate", type=int, help="target sampling rate.", default=16000)
+parser.add_argument("--mono", type=bool, help="convert signal to mono.", default=True)
+parser.add_argument("--offset", type=float, help="start reading after this time (in seconds).", default=0)
+parser.add_argument("--duration", type=float, help="only load up to this much audio (in seconds).", default=0)
+parser.add_argument("--res_type", type=AudioUtils.res_data_type, help=f"Resample type: {AudioUtils.res_type_list()}.", default='kaiser_best')
+parser.add_argument("--min_samples", type=int, help="Minimum sample number.", default=16000)
+parser.add_argument("-v", "--verbosity", action="store_true")
+args = parser.parse_args()
+
+def main(args):
+    audio_data, samplerate = AudioUtils.load_resample_audio_clip(args.audio_path,
+                                                args.sampling_rate,
+                                                args.mono,  args.offset,
+                                                args.duration, args.res_type,
+                                                args.min_samples)
+    sf.write(path.join(args.output_dir, path.basename(args.audio_path)), audio_data, samplerate)
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_audio_cpp.py b/scripts/py/gen_audio_cpp.py
new file mode 100644
index 0000000..54fdb23
--- /dev/null
+++ b/scripts/py/gen_audio_cpp.py
@@ -0,0 +1,153 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a set of audio clip in a given location into
+corresponding cpp files and a single hpp file referencing the vectors
+from the cpp files.
+"""
+import datetime
+import glob
+import math
+import os
+
+import numpy as np
+from os import path
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+from gen_utils import AudioUtils
+
+parser = ArgumentParser()
+parser.add_argument("--audio_path", type=str, help="path to audio folder to convert.")
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.")
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--sampling_rate", type=int, help="target sampling rate.", default=16000)
+parser.add_argument("--mono", type=bool, help="convert signal to mono.", default=True)
+parser.add_argument("--offset", type=float, help="start reading after this time (in seconds).", default=0)
+parser.add_argument("--duration", type=float, help="only load up to this much audio (in seconds).", default=0)
+parser.add_argument("--res_type", type=AudioUtils.res_data_type, help=f"Resample type: {AudioUtils.res_type_list()}.",
+                    default='kaiser_best')
+parser.add_argument("--min_samples", type=int, help="Minimum sample number.", default=16000)
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+parser.add_argument("-v", "--verbosity", action="store_true")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_filepath, cc_filepath, header_template_file, num_audios, audio_filenames, audio_array_namesizes):
+    print(f"++ Generating {header_filepath}")
+
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('AudioClips.hpp.template').stream(common_template_header=hdr,
+                                                       clips_count=num_audios,
+                                                       varname_size=audio_array_namesizes
+                                                       ) \
+        .dump(str(header_filepath))
+
+    print(f"++ Generating {cc_filepath}")
+
+    env.get_template('AudioClips.cc.template').stream(common_template_header=hdr,
+                                                       clips_count=num_audios,
+                                                       var_names=(name for name, _ in audio_array_namesizes),
+                                                       clip_sizes=(size for _, size in audio_array_namesizes),
+                                                       clip_names=audio_filenames) \
+        .dump(str(cc_filepath))
+
+
+def write_individual_audio_cc_file(clip_dirpath, clip_filename,
+                                   cc_filename, header_template_file, array_name,
+                                   sampling_rate_value, mono_value, offset_value, 
+                                   duration_value, res_type_value, min_len):
+    print(f"++ Converting {clip_filename} to {path.basename(cc_filename)}")
+    audio_filepath = path.join(clip_dirpath, clip_filename)
+    clip_data, samplerate = AudioUtils.load_resample_audio_clip(audio_filepath,
+                                                                sampling_rate_value, mono_value,
+                                                                offset_value, duration_value,
+                                                                res_type_value, min_len)
+
+    # Change from [-1, 1] fp32 range to int16 range.
+    clip_data = np.clip((clip_data * (1 << 15)), 
+                        np.iinfo(np.int16).min, 
+                        np.iinfo(np.int16).max).flatten().astype(np.int16)
+
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=clip_filename,
+                                 year=datetime.datetime.now().year)
+
+    hex_line_generator = (', '.join(map(hex, sub_arr))
+                          for sub_arr in np.array_split(clip_data, math.ceil(len(clip_data)/20)))
+
+    env.get_template('audio.cc.template').stream(common_template_header=hdr,
+                                                 size=len(clip_data),
+                                                 var_name=array_name,
+                                                 audio_data=hex_line_generator) \
+        .dump(str(cc_filename))
+
+    return len(clip_data)
+
+
+def main(args):
+    # Keep the count of the audio files converted
+    audioclip_idx = 0
+    audioclip_filenames = []
+    audioclip_array_names = []
+    header_filename = "InputFiles.hpp"
+    common_cc_filename = "InputFiles.cc"
+    header_filepath = path.join(args.header_folder_path, header_filename)
+    common_cc_filepath = path.join(args.source_folder_path, common_cc_filename)
+
+    if os.path.isdir(args.audio_path):  
+        filepaths = sorted(glob.glob(path.join(args.audio_path, '**/*.wav'), recursive=True))
+    elif os.path.isfile(args.audio_path):
+        filepaths = [args.audio_path]
+    else:
+        raise OSError("Directory or file does not exist.")
+
+    for filepath in filepaths:
+        filename = path.basename(filepath)	
+        clip_dirpath = path.dirname(filepath)
+        try:
+            audioclip_filenames.append(filename)
+
+            # Save the cc file
+            cc_filename = path.join(args.source_folder_path,
+                                    (filename.rsplit(".")[0]).replace(" ", "_") + ".cc")
+            array_name = "audio" + str(audioclip_idx)
+            array_size = write_individual_audio_cc_file(clip_dirpath, filename, cc_filename, args.license_template, array_name,
+                                                        args.sampling_rate, args.mono, args.offset,
+                                                        args.duration, args.res_type, args.min_samples)
+
+            audioclip_array_names.append((array_name, array_size))
+            # Increment audio index
+            audioclip_idx = audioclip_idx + 1
+        except:
+            if args.verbosity:
+                print(f"Failed to open {filename} as an audio.")
+
+    write_hpp_file(header_filepath, common_cc_filepath, args.license_template,
+                   audioclip_idx, audioclip_filenames, audioclip_array_names)
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_default_input_cpp.py b/scripts/py/gen_default_input_cpp.py
new file mode 100644
index 0000000..c091fd1
--- /dev/null
+++ b/scripts/py/gen_default_input_cpp.py
@@ -0,0 +1,53 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to generate the minimum InputFiles.hpp and cpp files required by an application.
+"""
+import datetime
+import os
+
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_file_path, header_template_file):
+    print(f"++ Generating {header_file_path}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('default.hpp.template').stream(common_template_header=hdr) \
+        .dump(str(header_file_path))
+
+
+def main(args):
+    header_filename = "InputFiles.hpp"
+    header_filepath = os.path.join(args.header_folder_path, header_filename)
+    write_hpp_file(header_filepath, args.license_template)
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_fpga_mem_map.py b/scripts/py/gen_fpga_mem_map.py
new file mode 100644
index 0000000..6a2d1d2
--- /dev/null
+++ b/scripts/py/gen_fpga_mem_map.py
@@ -0,0 +1,192 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+from argparse import ArgumentParser
+
+"""
+This file is used as part of post build steps to generate 'images.txt' file
+which can be copied over onto the MPS3 board's SD card. The purpose is to
+limit having to manually edit the file based on different load regions that
+the build scatter file might dictate.
+"""
+
+def is_commented(line):
+    if (line.startswith(";")):
+        return True
+    else:
+        return False
+
+
+def is_load_rom(line):
+    load_region_specifiers = ['LOAD_ROM', 'LD_ROM', 'LOAD_REGION']
+
+    for load_specifier in load_region_specifiers:
+        if line.startswith(load_specifier):
+            return True
+
+    return False
+
+
+class TargetSubsystem:
+
+    def __init__(self, target_subsystem_name: str):
+        """
+        Constructor for target class.
+        Arguments:
+            target_subsystem_name: name of the target subsystem
+        """
+        # Dict with mem map and binary names we expect
+        self.subsystems = {
+            "sse-200": {
+                "mmap_mcc" : {
+                    # FPGA addr |  MCC addr  |
+                    "0x00000000": "0x00000000", # ITCM (NS)
+                    "0x10000000": "0x01000000", # ITCM (S)
+                    "0x20000000": "0x02000000", # DTCM (NS)
+                    "0x30000000": "0x03000000", # DTCM (S)
+                    "0x60000000": "0x08000000"  # DDR (NS)
+                },
+                "bin_names": {
+                    0: "itcm.bin",
+                    1: "dram.bin"
+                }
+            },
+            "sse-300": {
+                "mmap_mcc" : {
+                    # FPGA addr |  MCC addr  |
+                    "0x00000000": "0x00000000", # ITCM (NS)
+                    "0x01000000": "0x02000000", # BRAM or FPGA's data SRAM (NS)
+                    "0x60000000": "0x08000000", # DDR (NS)
+                    "0x70000000": "0x0c000000"  # DDR (S)
+                },
+                "bin_names": {
+                    0: "itcm.bin",
+                    1: "dram.bin"
+                }
+            }
+        }
+
+        self.name = target_subsystem_name
+
+
+    def is_supported(self, target_subsystem: str) -> bool:
+        """
+        Checks if the target subsystem exists within systems
+        supported by this script
+        """
+        if target_subsystem in self.subsystems.keys():
+            return True
+
+        print(f"Platforms supported: {self.subsystems.keys()}")
+        return False
+
+
+    def mps3_mappings(self) -> dict:
+        """
+        Returns the FPGA <--> MCC address translations
+        as a dict
+        """
+        if self.is_supported(self.name):
+            return self.subsystems[self.name]['mmap_mcc']
+        return {}
+
+
+    def mps3_bin_names(self) -> dict:
+        """
+        Returns expected binary names for the executable built
+        for Cortex-M55 or Cortex-M55+Ethos-U55 targets in the
+        form of a dict with index and name
+        """
+        if self.is_supported(self.name):
+            return self.subsystems[self.name]['bin_names']
+
+        return {}
+
+
+def main(args):
+    """
+    Generates the output txt file with MCC to FPGA address mapping used
+    that is used by the MCC on FPGA to load executable regions into
+    correct regions in memory.
+    """
+    # List out arguments used:
+    scatter_file_path = args.scatter_file_path
+    target_subsystem_name = args.target_subsystem
+    output_file_path = args.output_file_path
+
+    target = TargetSubsystem(target_subsystem_name=target_subsystem_name)
+
+    if target.is_supported(target_subsystem_name) != True:
+        print(f'Target {target_subsystem_name} not supported.')
+        return
+
+    with open(scatter_file_path,'r') as scatter_file:
+        lines_read = scatter_file.readlines()
+        str_list = []
+
+        bin_names = None
+        mem_map = None
+
+        mem_map = target.mps3_mappings()
+        bin_names = target.mps3_bin_names()
+
+        str_list.append("TITLE: Arm MPS3 FPGA prototyping board Images Configuration File\n")
+        str_list.append("[IMAGES]\n\n")
+
+        cnt = 0
+        for line in lines_read:
+            if is_commented(line) or is_load_rom(line) != True:
+                continue
+
+            addr = line.split()[1]
+
+            if mem_map.get(addr, None) == None:
+                raise RuntimeError(
+                    'Translation for this address unavailable')
+            if cnt > len(bin_names):
+                raise RuntimeError(
+                    f"bin names len exceeded: {cnt}")
+
+            str_list.append("IMAGE" + str(cnt) + "ADDRESS: " +
+                mem_map[addr] + " ; MCC@" + mem_map[addr] +
+                " <=> FPGA@"  + addr + "\n")
+            str_list.append("IMAGE" + str(cnt) + "UPDATE: AUTO\n")
+            str_list.append("IMAGE" + str(cnt) + "FILE: \SOFTWARE\\" +
+                bin_names[cnt] + "\n\n")
+            cnt += 1
+
+        if cnt > 0 and cnt < 33:
+            str_list.insert(2,
+                "TOTALIMAGES: {} ;Number of Images (Max: 32)\n\n".format(
+                    cnt))
+        else:
+            raise RuntimeError('Invalid image count')
+
+        if os.path.exists(output_file_path):
+            os.remove(output_file_path)
+        print(''.join(str_list), file=open(output_file_path, "a"))
+
+
+if __name__ == "__main__":
+    parser = ArgumentParser()
+    parser.add_argument("--scatter_file_path", type=str, required=True,
+                        help="Path to the scatter file")
+    parser.add_argument("--target_subsystem", type=str, required=True,
+                        help="Target subsystem in use")
+    parser.add_argument("--output_file_path", type=str, required=True,
+                        help="Output file path")
+    args = parser.parse_args()
+    main(args)
diff --git a/scripts/py/gen_labels_cpp.py b/scripts/py/gen_labels_cpp.py
new file mode 100644
index 0000000..1be9c63
--- /dev/null
+++ b/scripts/py/gen_labels_cpp.py
@@ -0,0 +1,81 @@
+#!env/bin/python3
+
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a given text file with labels (annotations for an
+NN model output vector) into a vector list initialiser. The intention is for
+this script to be called as part of the build framework to auto-generate the
+cpp file with labels that can be used in the application without modification.
+"""
+import datetime
+import os
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+
+# Label file path
+parser.add_argument("--labels_file", type=str, help="Path to the label text file", required=True)
+# Output file to be generated
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.", required=True)
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.", required=True)
+parser.add_argument("--output_file_name", type=str, help="Required output file name", required=True)
+# Namespaces
+parser.add_argument("--namespaces", action='append', default=[])
+# License template
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def main(args):
+    # Get the labels from text file
+    with open(args.labels_file, "r") as f:
+        labels = f.read().splitlines()
+
+    # No labels?
+    if len(labels) == 0:
+        raise Exception(f"no labels found in {args.label_file}")
+
+    header_template = env.get_template(args.license_template)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=os.path.basename(args.labels_file),
+                                 year=datetime.datetime.now().year)
+
+    hpp_filename = os.path.join(args.header_folder_path, args.output_file_name + ".hpp")
+    env.get_template('Labels.hpp.template').stream(common_template_header=hdr,
+                                                   filename=(args.output_file_name).upper(),
+                                                   namespaces=args.namespaces) \
+        .dump(str(hpp_filename))
+
+
+    cc_filename = os.path.join(args.source_folder_path, args.output_file_name + ".cc")
+    env.get_template('Labels.cc.template').stream(common_template_header=hdr,
+                                                  labels=labels,
+                                                  labelsSize=len(labels),
+                                                  namespaces=args.namespaces) \
+        .dump(str(cc_filename))
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_model_cpp.py b/scripts/py/gen_model_cpp.py
new file mode 100644
index 0000000..4843668
--- /dev/null
+++ b/scripts/py/gen_model_cpp.py
@@ -0,0 +1,97 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to generate model c file that can be included in the
+project directly. This should be called as part of cmake framework
+should the models need to be generated at configuration stage.
+"""
+import datetime
+import os
+from argparse import ArgumentParser
+from pathlib import Path
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+
+parser.add_argument("--tflite_path", help="Model (.tflite) path", required=True)
+parser.add_argument("--output_dir", help="Output directory", required=True)
+parser.add_argument('-e', '--expression', action='append', default=[], dest="expr")
+parser.add_argument('--header', action='append', default=[], dest="headers")
+parser.add_argument('-ns', '--namespaces', action='append', default=[], dest="namespaces")
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_tflite_data(tflite_path):
+    # Extract array elements
+
+    bytes = model_hex_bytes(tflite_path)
+    line = '{\n'
+    i = 1
+    while True:
+        try:
+            el = next(bytes)
+            line = line + el + ', '
+            if i % 20 == 0:
+                yield line
+                line = ''
+            i += 1
+        except StopIteration:
+            line = line[:-2] + '};\n'
+            yield line
+            break
+
+
+def model_hex_bytes(tflite_path):
+    with open(tflite_path, 'rb') as tflite_model:
+        byte = tflite_model.read(1)
+        while byte != b"":
+            yield f'0x{byte.hex()}'
+            byte = tflite_model.read(1)
+
+
+def main(args):
+    if not os.path.isfile(args.tflite_path):
+        raise Exception(f"{args.tflite_path} not found")
+
+    # Cpp filename:
+    cpp_filename = Path(os.path.join(args.output_dir, os.path.basename(args.tflite_path) + ".cc")).absolute()
+    print(f"++ Converting {os.path.basename(args.tflite_path)} to\
+    {os.path.basename(cpp_filename)}")
+
+    os.makedirs(cpp_filename.parent, exist_ok=True)
+
+    header_template = env.get_template(args.license_template)
+
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 file_name=os.path.basename(args.tflite_path),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+
+    env.get_template('tflite.cc.template').stream(common_template_header=hdr,
+                                                  model_data=write_tflite_data(args.tflite_path),
+                                                  expressions=args.expr,
+                                                  additional_headers=args.headers,
+                                                  namespaces=args.namespaces).dump(str(cpp_filename))
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_rgb_cpp.py b/scripts/py/gen_rgb_cpp.py
new file mode 100644
index 0000000..1a2e09b
--- /dev/null
+++ b/scripts/py/gen_rgb_cpp.py
@@ -0,0 +1,135 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a set of RGB images in a given location into
+corresponding cpp files and a single hpp file referencing the vectors
+from the cpp files.
+"""
+import datetime
+import glob
+import math
+import os
+import numpy as np
+
+from argparse import ArgumentParser
+from PIL import Image, UnidentifiedImageError
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+parser.add_argument("--image_path", type=str, help="path to images folder or image file  to convert.")
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.")
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--image_size", type=int, nargs=2, help="Size (width and height) of the converted images.")
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_file_path, cc_file_path, header_template_file, num_images, image_filenames,
+                   image_array_names, image_size):
+    print(f"++ Generating {header_file_path}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('Images.hpp.template').stream(common_template_header=hdr,
+                                                   imgs_count=num_images,
+                                                   img_size=str(image_size[0] * image_size[1] * 3),
+                                                   var_names=image_array_names) \
+        .dump(str(header_file_path))
+
+    env.get_template('Images.cc.template').stream(common_template_header=hdr,
+                                                  var_names=image_array_names,
+                                                  img_names=image_filenames) \
+        .dump(str(cc_file_path))
+
+
+def write_individual_img_cc_file(image_filename, cc_filename, header_template_file, original_image,
+                                 image_size, array_name):
+    print(f"++ Converting {image_filename} to {os.path.basename(cc_filename)}")
+
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=os.path.basename(image_filename),
+                                 year=datetime.datetime.now().year)
+
+    original_image.thumbnail(image_size)
+    delta_w = abs(image_size[0] - original_image.size[0])
+    delta_h = abs(image_size[1] - original_image.size[1])
+    resized_image = Image.new('RGB', args.image_size, (255, 255, 255, 0))
+    resized_image.paste(original_image, (int(delta_w / 2), int(delta_h / 2)))
+
+    # Convert the image and write it to the cc file
+    rgb_data = np.array(resized_image, dtype=np.uint8).flatten()
+    hex_line_generator = (', '.join(map(hex, sub_arr))
+                          for sub_arr in np.array_split(rgb_data, math.ceil(len(rgb_data) / 20)))
+    env.get_template('image.cc.template').stream(common_template_header=hdr,
+                                                 var_name=array_name,
+                                                 img_data=hex_line_generator) \
+        .dump(str(cc_filename))
+
+
+def main(args):
+    # Keep the count of the images converted
+    image_idx = 0
+    image_filenames = []
+    image_array_names = []
+
+
+    if os.path.isdir(args.image_path):
+        filepaths = sorted(glob.glob(os.path.join(args.image_path, '**/*.*'), recursive=True))
+    elif os.path.isfile(args.image_path):
+        filepaths = [args.image_path]
+    else:
+        raise OSError("Directory or file does not exist.")
+
+    for filepath in filepaths:
+        filename = os.path.basename(filepath)
+
+        try:
+            original_image = Image.open(filepath).convert("RGB")
+        except UnidentifiedImageError:
+            print(f"-- Skipping file {filepath} due to unsupported image format.")
+            continue
+
+        image_filenames.append(filename)
+
+        # Save the cc file
+        cc_filename = os.path.join(args.source_folder_path,
+                                   (filename.rsplit(".")[0]).replace(" ", "_") + ".cc")
+        array_name = "im" + str(image_idx)
+        image_array_names.append(array_name)
+        write_individual_img_cc_file(filename, cc_filename, args.license_template,
+                                     original_image, args.image_size, array_name)
+
+        # Increment image index
+        image_idx = image_idx + 1
+
+    header_filename = "InputFiles.hpp"
+    header_filepath = os.path.join(args.header_folder_path, header_filename)
+    common_cc_filename = "InputFiles.cc"
+    common_cc_filepath = os.path.join(args.source_folder_path, common_cc_filename)
+    write_hpp_file(header_filepath, common_cc_filepath, args.license_template,
+                   image_idx, image_filenames, image_array_names, args.image_size)
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_test_data_cpp.py b/scripts/py/gen_test_data_cpp.py
new file mode 100644
index 0000000..7cc5f11
--- /dev/null
+++ b/scripts/py/gen_test_data_cpp.py
@@ -0,0 +1,162 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a set of pairs of npy files in a given location into
+corresponding cpp files and a single hpp file referencing the vectors
+from the cpp files.
+"""
+import datetime
+import math
+import os
+import numpy as np
+
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+parser.add_argument("--data_folder_path", type=str, help="path to ifm-ofm npy folder to convert.")
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.")
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--usecase", type=str, default="", help="Test data file suffix.")
+parser.add_argument("--namespaces", action='append', default=[])
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+parser.add_argument("-v", "--verbosity", action="store_true")
+
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_filename, cc_file_path, header_template_file, num_iofms,
+                   ifm_array_names, ifm_size, ofm_array_names, ofm_size, iofm_data_type):
+    header_file_path = os.path.join(args.header_folder_path, header_filename)
+
+    print(f"++ Generating {header_file_path}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('TestData.hpp.template').stream(common_template_header=hdr,
+                                                   fm_count=num_iofms,
+                                                   ifm_var_names=ifm_array_names,
+                                                   ifm_var_size=ifm_size,
+                                                   ofm_var_names=ofm_array_names,
+                                                   ofm_var_size=ofm_size,
+                                                   data_type=iofm_data_type,
+                                                   namespaces=args.namespaces) \
+        .dump(str(header_file_path))
+
+    env.get_template('TestData.cc.template').stream(common_template_header=hdr,
+                                                  include_h=header_filename,
+                                                  ifm_var_names=ifm_array_names,
+                                                  ofm_var_names=ofm_array_names,
+                                                  data_type=iofm_data_type,
+                                                  namespaces=args.namespaces) \
+        .dump(str(cc_file_path))
+
+
+def write_individual_cc_file(filename, cc_filename, header_filename, header_template_file, array_name, iofm_data_type):
+    print(f"++ Converting {filename} to {os.path.basename(cc_filename)}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=os.path.basename(filename),
+                                 year=datetime.datetime.now().year)
+
+    # Convert the image and write it to the cc file
+    fm_data = (np.load(os.path.join(args.data_folder_path, filename))).flatten()
+    type(fm_data.dtype)
+    hex_line_generator = (', '.join(map(hex, sub_arr))
+                          for sub_arr in np.array_split(fm_data, math.ceil(len(fm_data) / 20)))
+
+    env.get_template('testdata.cc.template').stream(common_template_header=hdr,
+                                                 include_h=header_filename,
+                                                 var_name=array_name,
+                                                 fm_data=hex_line_generator,
+                                                 data_type=iofm_data_type,
+                                                 namespaces=args.namespaces) \
+        .dump(str(cc_filename))
+
+
+def get_npy_vec_size(filename: str) -> int:
+    """
+    Gets the size of the array in the npy file
+    Args:
+        filename: npy file path.
+    Return:
+        size in bytes
+    """
+    data = np.load(os.path.join(args.data_folder_path, filename))
+    return (data.size * data.dtype.itemsize)
+
+
+def main(args):
+    # Keep the count of the images converted
+    ifm_array_names = []
+    ofm_array_names = []
+
+    add_usecase_fname = ("_" + args.usecase) if (args.usecase is not "") else ""
+    header_filename = "TestData" + add_usecase_fname + ".hpp"
+    common_cc_filename = "TestData" + add_usecase_fname + ".cc"
+
+    # In the data_folder_path there should be pairs of ifm-ofm
+    # It's assumed the ifm-ofm nameing convention: ifm0.npy-ofm0.npy, ifm1.npy-ofm1.npy
+    i_ofms_count = int(len([name for name in os.listdir(os.path.join(args.data_folder_path)) if name.lower().endswith('.npy')]) / 2)
+
+    iofm_data_type = "int8_t"
+    if (i_ofms_count > 0):
+        iofm_data_type = "int8_t" if (np.load(os.path.join(args.data_folder_path, "ifm0.npy")).dtype == np.int8) else "uint8_t"
+
+    ifm_size = -1
+    ofm_size = -1
+
+    for idx in range(i_ofms_count):
+        # Save the fm cc file
+        base_name = "ifm" + str(idx)
+        filename = base_name+".npy"
+        array_name = base_name + add_usecase_fname
+        cc_filename = os.path.join(args.source_folder_path, array_name + ".cc")
+        ifm_array_names.append(array_name)
+        write_individual_cc_file(filename, cc_filename, header_filename, args.license_template, array_name, iofm_data_type)
+        if ifm_size == -1:
+            ifm_size = get_npy_vec_size(filename)
+        elif ifm_size != get_npy_vec_size(filename):
+            raise Exeception(f"ifm size changed for index {idx}")
+
+        # Save the fm cc file
+        base_name = "ofm" + str(idx)
+        filename = base_name+".npy"
+        array_name = base_name + add_usecase_fname
+        cc_filename = os.path.join(args.source_folder_path, array_name + ".cc")
+        ofm_array_names.append(array_name)
+        write_individual_cc_file(filename, cc_filename, header_filename, args.license_template, array_name, iofm_data_type)
+        if ofm_size == -1:
+            ofm_size = get_npy_vec_size(filename)
+        elif ofm_size != get_npy_vec_size(filename):
+            raise Exeception(f"ofm size changed for index {idx}")
+
+    common_cc_filepath = os.path.join(args.source_folder_path, common_cc_filename)
+    write_hpp_file(header_filename, common_cc_filepath, args.license_template,
+                   i_ofms_count, ifm_array_names, ifm_size, ofm_array_names, ofm_size, iofm_data_type)
+
+
+if __name__ == '__main__':
+    if args.verbosity:
+        print("Running gen_test_data_cpp with args: "+str(args))
+    main(args)
diff --git a/scripts/py/gen_utils.py b/scripts/py/gen_utils.py
new file mode 100644
index 0000000..4a56646
--- /dev/null
+++ b/scripts/py/gen_utils.py
@@ -0,0 +1,115 @@
+#!env/bin/python3
+
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import soundfile as sf
+import resampy
+import numpy as np
+
+
+class AudioUtils:
+    @staticmethod
+    def res_data_type(res_type_value):
+        """
+        Returns the input string if is one of the valid resample type
+        """
+        import argparse
+        if res_type_value not in AudioUtils.res_type_list():
+            raise argparse.ArgumentTypeError(f"{res_type_value} not valid. Supported only {AudioUtils.res_type_list()}")
+        return res_type_value
+
+    @staticmethod
+    def res_type_list():
+        """
+        Returns the resample type list
+        """
+        return ['kaiser_best', 'kaiser_fast']
+
+    @staticmethod
+    def load_resample_audio_clip(path, target_sr=16000, mono=True, offset=0.0, duration=0, res_type='kaiser_best',
+                                 min_len=16000):
+        """
+        Load and resample an audio clip with the given desired specs.
+
+        Parameters:
+        ----------
+        path (string):             Path to the input audio clip.
+        target_sr (int, optional): Target sampling rate. Positive number are considered valid, 
+                                    if zero or negative the native sampling rate of the file will be preserved. Default is 16000. 
+        mono (bool, optional):     Specify if the audio file needs to be converted to mono. Default is True.
+        offset (float, optional):  Target sampling rate. Default is 0.0.
+        duration (int, optional):  Target duration. Positive number are considered valid, 
+                                    if zero or negative the duration of the file will be preserved. Default is 0.
+        res_type (int, optional):  Resample type to use,  Default is 'kaiser_best'.
+        min_len (int, optional):   Minimun lenght of the output audio time series. Default is 16000.
+
+        Returns:
+        ----------
+        y (np.ndarray): Output audio time series of shape shape=(n,) or (2, n).
+        sr (int):       A scalar number > 0 that represent the sampling rate of `y`
+        """
+        try:
+            with sf.SoundFile(path) as audio_file:
+                origin_sr = audio_file.samplerate
+
+                if offset:
+                    # Seek to the start of the target read
+                    audio_file.seek(int(offset * origin_sr))
+
+                if duration > 0:
+                    num_frame_duration = int(duration * origin_sr)
+                else:
+                    num_frame_duration = -1
+
+                # Load the target number of frames
+                y = audio_file.read(frames=num_frame_duration, dtype=np.float32, always_2d=False).T
+
+        except:
+            print(f"Failed to open {path} as an audio.")
+
+        # Convert to mono if requested and if audio has more than one dimension
+        if mono and (y.ndim > 1):
+            y = np.mean(y, axis=0)
+
+        if not (origin_sr == target_sr) and (target_sr > 0):
+            ratio = float(target_sr) / origin_sr
+            axis = -1
+            n_samples = int(np.ceil(y.shape[axis] * ratio))
+
+            # Resample using resampy
+            y_rs = resampy.resample(y, origin_sr, target_sr, filter=res_type, axis=axis)
+            n_rs_samples = y_rs.shape[axis]
+
+            # Adjust the size
+            if n_rs_samples > n_samples:
+                slices = [slice(None)] * y_rs.ndim
+                slices[axis] = slice(0, n_samples)
+                y = y_rs[tuple(slices)]
+            elif n_rs_samples < n_samples:
+                lengths = [(0, 0)] * y_rs.ndim
+                lengths[axis] = (0, n_samples - n_rs_samples)
+                y = np.pad(y_rs, lengths, 'constant', constant_values=(0))
+
+            sr = target_sr
+        else:
+            sr = origin_sr
+
+        # Pad if necessary and min lenght is setted (min_len> 0)
+        if (y.shape[0] < min_len) and (min_len > 0):
+            sample_to_pad = min_len - y.shape[0]
+            y = np.pad(y, (0, sample_to_pad), 'constant', constant_values=(0))
+
+        return y, sr
diff --git a/scripts/py/requirements.txt b/scripts/py/requirements.txt
new file mode 100644
index 0000000..6330f58
--- /dev/null
+++ b/scripts/py/requirements.txt
@@ -0,0 +1,12 @@
+cffi==1.14.2
+Jinja2==2.11.2
+llvmlite==0.33.0
+MarkupSafe==1.1.1
+numba==0.50.1
+numpy==1.17.4
+Pillow==7.0.0
+pycparser==2.20
+resampy==0.2.2
+scipy==1.5.2
+six==1.15.0
+SoundFile==0.10.3.post1
diff --git a/scripts/py/templates/AudioClips.cc.template b/scripts/py/templates/AudioClips.cc.template
new file mode 100644
index 0000000..edf46bc
--- /dev/null
+++ b/scripts/py/templates/AudioClips.cc.template
@@ -0,0 +1,62 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+
+static const char *audio_clip_filenames[] = {
+{% for name in clip_names %}
+    "{{name}}",
+{% endfor %}
+};
+
+static const int16_t *audio_clip_arrays[] = {
+    {{ var_names|join(',\n\t') }}
+};
+
+
+static const size_t audio_clip_sizes[NUMBER_OF_FILES] = {
+    {{ clip_sizes|join(',\n\t') }}
+};
+
+
+const char* get_filename(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return audio_clip_filenames[idx];
+    }
+    return nullptr;
+}
+
+
+const int16_t* get_audio_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return audio_clip_arrays[idx];
+    }
+    return nullptr;
+}
+
+
+uint32_t get_audio_array_size(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return audio_clip_sizes[idx];
+    }
+    return 0;
+}
+
diff --git a/scripts/py/templates/AudioClips.hpp.template b/scripts/py/templates/AudioClips.hpp.template
new file mode 100644
index 0000000..eb0beda
--- /dev/null
+++ b/scripts/py/templates/AudioClips.hpp.template
@@ -0,0 +1,34 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef GENERATED_AUDIOCLIPS_H
+#define GENERATED_AUDIOCLIPS_H
+
+#include <cstdint>
+#include <stddef.h>
+
+#define NUMBER_OF_FILES  ({{clips_count}}U)
+{% for var_name, size in varname_size %}
+extern const int16_t {{var_name}}[{{size}}];
+{% endfor %}
+
+const char* get_filename(const uint32_t idx);
+const int16_t* get_audio_array(const uint32_t idx);
+uint32_t get_audio_array_size(const uint32_t idx);
+
+#endif /* GENERATED_AUDIOCLIPS_H */
diff --git a/scripts/py/templates/Images.cc.template b/scripts/py/templates/Images.cc.template
new file mode 100644
index 0000000..6e86f98
--- /dev/null
+++ b/scripts/py/templates/Images.cc.template
@@ -0,0 +1,47 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+
+static const char *img_filenames[] = {
+{% for name in img_names %}
+    "{{name}}",
+{% endfor %}
+};
+
+static const uint8_t *img_arrays[] = {
+    {{ var_names|join(',\n\t') }}
+};
+
+const char* get_filename(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_filenames[idx];
+    }
+    return nullptr;
+}
+
+
+const uint8_t* get_img_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_arrays[idx];
+    }
+    return nullptr;
+}
+
diff --git a/scripts/py/templates/Images.hpp.template b/scripts/py/templates/Images.hpp.template
new file mode 100644
index 0000000..89ce39e
--- /dev/null
+++ b/scripts/py/templates/Images.hpp.template
@@ -0,0 +1,34 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef GENERATED_IMAGES_H
+#define GENERATED_IMAGES_H
+
+#include <cstdint>
+
+#define NUMBER_OF_FILES  ({{imgs_count}}U)
+#define IMAGE_DATA_SIZE  ({{img_size}}U)
+
+{% for var_name in var_names %}
+extern const uint8_t {{var_name}}[IMAGE_DATA_SIZE];
+{% endfor %}
+
+const char* get_filename(const uint32_t idx);
+const uint8_t* get_img_array(const uint32_t idx);
+
+#endif /* GENERATED_IMAGES_H */
diff --git a/scripts/py/templates/Labels.cc.template b/scripts/py/templates/Labels.cc.template
new file mode 100644
index 0000000..f1ec1b5
--- /dev/null
+++ b/scripts/py/templates/Labels.cc.template
@@ -0,0 +1,54 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "BufAttributes.hpp"
+
+#include <vector>
+#include <string>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+static const char * labelsVec[] LABELS_ATTRIBUTE = {
+{% for label in labels %}
+    "{{label}}",
+{% endfor %}
+};
+
+bool GetLabelsVector(std::vector<std::string>& labels)
+{
+    constexpr size_t labelsSz = {{labelsSize}};
+    labels.clear();
+
+    if (!labelsSz) {
+        return false;
+    }
+
+    labels.reserve(labelsSz);
+
+    for (size_t i = 0; i < labelsSz; ++i) {
+        labels.emplace_back(labelsVec[i]);
+    }
+
+    return true;
+}
+
+{% for namespace in namespaces %}
+} /* namespace {{name_space}} */
+{% endfor %}
diff --git a/scripts/py/templates/Labels.hpp.template b/scripts/py/templates/Labels.hpp.template
new file mode 100644
index 0000000..c16a983
--- /dev/null
+++ b/scripts/py/templates/Labels.hpp.template
@@ -0,0 +1,41 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef {{filename}}_HPP
+#define {{filename}}_HPP
+
+#include <string>
+#include <vector>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+/**
+ * @brief       Gets the label vector corresponding to the model
+ * @param[out]  labels   Vector of strings.
+ * @return      true if successful, false otherwise.
+ */
+extern bool GetLabelsVector(std::vector<std::string>& labels);
+
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
+
+#endif /* {{filename}}_HPP */
diff --git a/scripts/py/templates/TestData.cc.template b/scripts/py/templates/TestData.cc.template
new file mode 100644
index 0000000..1acd14d
--- /dev/null
+++ b/scripts/py/templates/TestData.cc.template
@@ -0,0 +1,51 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "{{include_h}}"
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+static const {{data_type}} *ifm_arrays[] = {
+    {{ ifm_var_names|join(',\n\t') }}
+};
+
+static const {{data_type}} *ofm_arrays[] = {
+    {{ ofm_var_names|join(',\n\t') }}
+};
+
+const {{data_type}}* get_ifm_data_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FM_FILES) {
+        return ifm_arrays[idx];
+    }
+    return nullptr;
+}
+
+const {{data_type}}* get_ofm_data_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FM_FILES) {
+        return ofm_arrays[idx];
+    }
+    return nullptr;
+}
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
diff --git a/scripts/py/templates/TestData.hpp.template b/scripts/py/templates/TestData.hpp.template
new file mode 100644
index 0000000..cdedd48
--- /dev/null
+++ b/scripts/py/templates/TestData.hpp.template
@@ -0,0 +1,47 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef GENERATED_TEST_DATA_H
+#define GENERATED_TEST_DATA_H
+
+#include <cstdint>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+#define NUMBER_OF_FM_FILES  ({{fm_count}}U)
+#define IFM_DATA_SIZE  ({{ifm_var_size}}U)
+#define OFM_DATA_SIZE  ({{ofm_var_size}}U)
+
+{% for ifm_var_name in ifm_var_names %}
+extern const {{data_type}} {{ifm_var_name}}[IFM_DATA_SIZE];
+{% endfor %}
+
+{% for ofm_var_name in ofm_var_names %}
+extern const {{data_type}} {{ofm_var_name}}[OFM_DATA_SIZE];
+{% endfor %}
+
+const {{data_type}}* get_ifm_data_array(const uint32_t idx);
+const {{data_type}}* get_ofm_data_array(const uint32_t idx);
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
+
+#endif /* GENERATED_TEST_DATA_H */
diff --git a/scripts/py/templates/audio.cc.template b/scripts/py/templates/audio.cc.template
new file mode 100644
index 0000000..f1e29ef
--- /dev/null
+++ b/scripts/py/templates/audio.cc.template
@@ -0,0 +1,25 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+#include "BufAttributes.hpp"
+#include <cstdint>
+
+const int16_t {{var_name}} [{{size}}] IFM_BUF_ATTRIBUTE = {
+    {{audio_data|join(',\n\t')}}
+};
\ No newline at end of file
diff --git a/scripts/py/templates/default.hpp.template b/scripts/py/templates/default.hpp.template
new file mode 100644
index 0000000..acba891
--- /dev/null
+++ b/scripts/py/templates/default.hpp.template
@@ -0,0 +1,28 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef DEFAULT_GENERATED_INPUT_H
+#define DEFAULT_GENERATED_INPUT_H
+
+#include <cstdint>
+
+#define NUMBER_OF_FILES  (0U)
+
+const char* get_filename(const uint32_t idx);
+
+#endif /* DEFAULT_GENERATED_INPUT_H */
diff --git a/scripts/py/templates/header_template.txt b/scripts/py/templates/header_template.txt
new file mode 100644
index 0000000..0dac4be
--- /dev/null
+++ b/scripts/py/templates/header_template.txt
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) {{year}}, Arm Limited and affiliates.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*********************    Autogenerated file. DO NOT EDIT *******************
+ * Generated from {{script_name}} tool {% if file_name %}and {{file_name}}{% endif %} file.
+ * Date: {{gen_time}}
+ ***************************************************************************/
diff --git a/scripts/py/templates/image.cc.template b/scripts/py/templates/image.cc.template
new file mode 100644
index 0000000..010daa1
--- /dev/null
+++ b/scripts/py/templates/image.cc.template
@@ -0,0 +1,25 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+#include "BufAttributes.hpp"
+#include <cstdint>
+
+const uint8_t {{var_name}}[] IFM_BUF_ATTRIBUTE = {
+    {{img_data|join(',\n\t')}}
+};
diff --git a/scripts/py/templates/testdata.cc.template b/scripts/py/templates/testdata.cc.template
new file mode 100644
index 0000000..e3c1dc6
--- /dev/null
+++ b/scripts/py/templates/testdata.cc.template
@@ -0,0 +1,33 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "{{include_h}}"
+#include "BufAttributes.hpp"
+#include <cstdint>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+const {{data_type}} {{var_name}} [{{size}}] IFM_BUF_ATTRIBUTE = {
+    {{fm_data|join(',\n\t')}}
+};
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
diff --git a/scripts/py/templates/tflite.cc.template b/scripts/py/templates/tflite.cc.template
new file mode 100644
index 0000000..97bdec5
--- /dev/null
+++ b/scripts/py/templates/tflite.cc.template
@@ -0,0 +1,49 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "Model.hpp"
+{% for header in additional_headers %}
+#include "{{header}}"
+{% endfor %}
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+{% for expression in expressions %}
+{{expression}};
+{% endfor %}
+
+static const uint8_t nn_model[] MODEL_TFLITE_ATTRIBUTE =
+{% for model_hex_line in model_data %}
+{{model_hex_line}}
+{% endfor %}
+
+const uint8_t * GetModelPointer()
+{
+    return nn_model;
+}
+
+size_t GetModelLen()
+{
+    return sizeof(nn_model);
+}
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
diff --git a/scripts/vela/vela.ini b/scripts/vela/vela.ini
new file mode 100644
index 0000000..fcd18be
--- /dev/null
+++ b/scripts/vela/vela.ini
@@ -0,0 +1,80 @@
+;
+; Copyright (c) 2021 Arm Limited. All rights reserved.
+; SPDX-License-Identifier: Apache-2.0
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;     http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+;
+
+; -----------------------------------------------------------------------------
+; Vela configuration file
+
+; -----------------------------------------------------------------------------
+; System Configuration
+
+; Ethos-U55 Deep Embedded: SRAM (1.6 GB/s) and Flash (0.1 GB/s)
+[System_Config.Ethos_U55_Deep_Embedded]
+core_clock=200e6
+axi0_port=Sram
+axi1_port=OffChipFlash
+Sram_clock_scale=1.0
+Sram_burst_length=32
+Sram_read_latency=32
+Sram_write_latency=32
+OffChipFlash_clock_scale=0.0625
+OffChipFlash_burst_length=128
+OffChipFlash_read_latency=64
+OffChipFlash_write_latency=64
+
+; Ethos-U55 High-End Embedded: SRAM (4 GB/s) and Flash (0.5 GB/s)
+[System_Config.Ethos_U55_High_End_Embedded]
+core_clock=500e6
+axi0_port=Sram
+axi1_port=OffChipFlash
+Sram_clock_scale=1.0
+Sram_burst_length=32
+Sram_read_latency=32
+Sram_write_latency=32
+OffChipFlash_clock_scale=0.125
+OffChipFlash_burst_length=128
+OffChipFlash_read_latency=64
+OffChipFlash_write_latency=64
+
+; -----------------------------------------------------------------------------
+; Memory Mode
+
+; SRAM Only: only one AXI port is used and the SRAM is used for all storage
+[Memory_Mode.Sram_Only]
+const_mem_area=Axi0
+arena_mem_area=Axi0
+cache_mem_area=Axi0
+
+; Shared SRAM: the SRAM is shared between the Ethos-U and the Cortex-M software
+; The non-SRAM memory is assumed to be read-only
+[Memory_Mode.Shared_Sram]
+const_mem_area=Axi1
+arena_mem_area=Axi0
+cache_mem_area=Axi0
+
+; Dedicated SRAM: the SRAM (384KB) is only for use by the Ethos-U
+; The non-SRAM memory is assumed to be read-writeable
+[Memory_Mode.Dedicated_Sram]
+const_mem_area=Axi1
+arena_mem_area=Axi1
+cache_mem_area=Axi0
+cache_sram_size=393216
+
+; Dedicated SRAM 512KB: the SRAM (512KB) is only for use by the Ethos-U
+; The non-SRAM memory is assumed to be read-writeable
+[Memory_Mode.Dedicated_Sram_512KB]
+inherit=Memory_Mode.Dedicated_Sram
+cache_sram_size=524288
diff --git a/source/application/hal/hal.c b/source/application/hal/hal.c
new file mode 100644
index 0000000..dbf94ba
--- /dev/null
+++ b/source/application/hal/hal.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"            /* API */
+
+#include "hal_config.h"     /* HAL configuration */
+#include "system_init.h"
+
+#include <stdio.h>
+#include <assert.h>
+
+#if defined(ARM_NPU)
+
+#include "ethosu_driver.h"              /* Arm Ethos-U55 driver header */
+#include "timing_adapter.h"             /* Arm Ethos-U55 timing adapter driver header */
+#include "timing_adapter_settings.h"    /* Arm Ethos-U55 timing adapter settings */
+
+/**
+ * @brief   Initialises the Arm Ethos-U55 NPU
+ * @return  0 if successful, error code otherwise
+ **/
+static int _arm_npu_init(void);
+
+#endif /* ARM_NPU */
+
+int hal_init(hal_platform* platform, data_acq_module* data_acq,
+    data_psn_module* data_psn, platform_timer* timer)
+{
+    assert(platform && data_acq && data_psn);
+
+    platform->data_acq  = data_acq;
+    platform->data_psn  = data_psn;
+    platform->timer     = timer;
+    platform->platform_init     = system_init;
+    platform->platform_release  = system_release;
+    system_name(platform->plat_name, sizeof(platform->plat_name));
+
+    return 0;
+}
+
+/**
+ * @brief  Local helper function to clean the slate for current platform.
+ **/
+static void _hal_platform_clear(hal_platform* platform)
+{
+    assert(platform);
+    platform->inited = 0;
+}
+
+int hal_platform_init(hal_platform* platform)
+{
+    int state;
+    assert(platform && platform->platform_init);
+    _hal_platform_clear(platform);
+
+    /* Initialise platform */
+    if (0 != (state = platform->platform_init())) {
+        printf_err("failed to initialise platform %s\n", platform->plat_name);
+        return state;
+    }
+
+    /* Initialise the data acquisition module */
+    if (0 != (state = data_acq_channel_init(platform->data_acq))) {
+        if (!platform->data_acq->inited) {
+            printf_err("failed to initialise data acq module: %s\n",
+                platform->data_acq->system_name);
+        }
+        hal_platform_release(platform);
+        return state;
+    }
+
+    /* Initialise the presentation module */
+    if (0 != (state = data_psn_system_init(platform->data_psn))) {
+        printf_err("failed to initialise data psn module: %s\n",
+            platform->data_psn->system_name);
+        data_acq_channel_release(platform->data_acq);
+        hal_platform_release(platform);
+        return state;
+    }
+
+#if defined(ARM_NPU)
+
+    /* If Arm Ethos-U55 NPU is to be used, we initialise it here */
+    if (0 != (state = _arm_npu_init())) {
+        return state;
+    }
+
+#endif /* ARM_NPU */
+
+    /* followed by the timer module */
+    init_timer(platform->timer);
+
+    info("%s platform initialised\n", platform->plat_name);
+    debug("using %s module for data acquisition\n",
+            platform->data_acq->system_name);
+    debug("using %s module for data presentation\n",
+        platform->data_psn->system_name);
+
+    platform->inited = !state;
+
+    return state;
+}
+
+void hal_platform_release(hal_platform *platform)
+{
+    assert(platform && platform->platform_release);
+    data_acq_channel_release(platform->data_acq);
+    data_psn_system_release(platform->data_psn);
+
+    _hal_platform_clear(platform);
+    info("releasing platform %s\n", platform->plat_name);
+    platform->platform_release();
+}
+
+#if defined(ARM_NPU)
+/**
+ * @brief   Defines the Ethos-U interrupt handler: just a wrapper around the default
+ *          implementation.
+ **/
+static void _arm_npu_irq_handler(void)
+{
+    /* Call the default interrupt handler from the NPU driver */
+    ethosu_irq_handler();
+}
+
+/**
+ * @brief  Initialises the NPU IRQ
+ **/
+static void _arm_npu_irq_init(void)
+{
+    const IRQn_Type ethosu_irqnum = (IRQn_Type)EthosU_IRQn;
+
+    /* Register the EthosU IRQ handler in our vector table.
+     * Note, this handler comes from the EthosU driver */
+    NVIC_SetVector(ethosu_irqnum, (uint32_t)_arm_npu_irq_handler);
+
+    /* Enable the IRQ */
+    NVIC_EnableIRQ(ethosu_irqnum);
+
+    debug("EthosU IRQ#: %u, Handler: 0x%p\n",
+            ethosu_irqnum, _arm_npu_irq_handler);
+}
+
+static int _arm_npu_timing_adapter_init(void)
+{
+#if defined (TA0_BASE)
+    struct timing_adapter ta_0;
+    struct timing_adapter_settings ta_0_settings = {
+        .maxr = TA0_MAXR,
+        .maxw = TA0_MAXW,
+        .maxrw = TA0_MAXRW,
+        .rlatency = TA0_RLATENCY,
+        .wlatency = TA0_WLATENCY,
+        .pulse_on = TA0_PULSE_ON,
+        .pulse_off = TA0_PULSE_OFF,
+        .bwcap = TA0_BWCAP,
+        .perfctrl = TA0_PERFCTRL,
+        .perfcnt = TA0_PERFCNT,
+        .mode = TA0_MODE,
+        .maxpending = 0, /* This is a read-only parameter */
+        .histbin = TA0_HISTBIN,
+        .histcnt = TA0_HISTCNT
+    };
+
+    if (0 != ta_init(&ta_0, TA0_BASE)) {
+        printf_err("TA0 initialisation failed\n");
+        return 1;
+    }
+
+    ta_set_all(&ta_0, &ta_0_settings);
+#endif /* defined (TA0_BASE) */
+
+#if defined (TA1_BASE)
+    struct timing_adapter ta_1;
+    struct timing_adapter_settings ta_1_settings = {
+        .maxr = TA1_MAXR,
+        .maxw = TA1_MAXW,
+        .maxrw = TA1_MAXRW,
+        .rlatency = TA1_RLATENCY,
+        .wlatency = TA1_WLATENCY,
+        .pulse_on = TA1_PULSE_ON,
+        .pulse_off = TA1_PULSE_OFF,
+        .bwcap = TA1_BWCAP,
+        .perfctrl = TA1_PERFCTRL,
+        .perfcnt = TA1_PERFCNT,
+        .mode = TA1_MODE,
+        .maxpending = 0, /* This is a read-only parameter */
+        .histbin = TA1_HISTBIN,
+        .histcnt = TA1_HISTCNT
+    };
+
+    if (0 != ta_init(&ta_1, TA1_BASE)) {
+        printf_err("TA1 initialisation failed\n");
+        return 1;
+    }
+
+    ta_set_all(&ta_1, &ta_1_settings);
+#endif /* defined (TA1_BASE) */
+
+    return 0;
+}
+
+static int _arm_npu_init(void)
+{
+    int err = 0;
+
+    /* If the platform has timing adapter blocks along with Ethos-U55 core
+     * block, initialise them here. */
+    if (0 != (err = _arm_npu_timing_adapter_init())) {
+        return err;
+    }
+
+    /* Initialise the IRQ */
+    _arm_npu_irq_init();
+
+    /* Initialise Ethos-U55 device */
+    const void * ethosu_base_address = (void *)(SEC_ETHOS_U55_BASE);
+
+    if (0 != (err = ethosu_init_v3(
+                        ethosu_base_address,    /* Ethos-U55's base address. */
+                        NULL,                   /* Pointer to fast mem area - NULL for U55. */
+                        0,                      /* Fast mem region size. */
+                        1,                      /* Security enable. */
+                        1))) {                  /* Privilege enable. */
+        printf_err("failed to initalise Ethos-U55 device\n");
+        return err;
+    }
+
+    info("Ethos-U55 device initialised\n");
+
+    /* Get Ethos-U55 version */
+    struct ethosu_version version;
+    if (0 != (err = ethosu_get_version(&version))) {
+        printf_err("failed to fetch Ethos-U55 version info\n");
+        return err;
+    }
+
+    info("Ethos-U55 version info:\n");
+    info("\tArch:       v%u.%u.%u\n", version.id.arch_major_rev,
+                                    version.id.arch_minor_rev,
+                                    version.id.arch_patch_rev);
+    info("\tDriver:     v%u.%u.%u\n", version.id.driver_major_rev,
+                                    version.id.driver_minor_rev,
+                                    version.id.driver_patch_rev);
+    info("\tMACs/cc:    %u\n", (1 << version.cfg.macs_per_cc));
+    info("\tCmd stream: v%u\n", version.cfg.cmd_stream_version);
+    info("\tSHRAM size: %u\n", version.cfg.shram_size);
+
+    return 0;
+}
+#endif /* ARM_NPU */
diff --git a/source/application/hal/include/data_acq.h b/source/application/hal/include/data_acq.h
new file mode 100644
index 0000000..965fbe5
--- /dev/null
+++ b/source/application/hal/include/data_acq.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DATA_ACQ_H
+#define DATA_ACQ_H
+
+/**
+ * This file is the top level abstraction for the data acquisition module.
+ **/
+#include <stdint.h>
+
+/* Structure to encompass the data acquisition module and it's methods. */
+typedef struct data_acquisition_module {
+    int inited;                 /**< initialised or not. */
+    char system_name[8];        /**< name(s) of the channel in use. */
+    int (* system_init)(void);  /**< channel initialisation function. */
+
+    /* Function to go and check if there are any events that require handling. */
+    int (* get_input)(char *user_input, int size);
+} data_acq_module;
+
+/**
+ * @brief           Initialise the data acquisition channel: goes and
+ *                  sets the required channel up for usage.
+ * @param[in,out]   module  Pointer to a pre-allocated data
+ *                          acquisition structure object.
+ * @return          0 if successful, error code otherwise.
+ **/
+int data_acq_channel_init(data_acq_module *module);
+
+/**
+ * @brief           Releases the data acquisition channel.
+ * @param[in,out]   module  Pointer to a pre-allocated data
+ *                          acquisition structure object.
+ * @return          0 if successful, error code otherwise.
+ **/
+int data_acq_channel_release(data_acq_module *module);
+
+#endif /* DATA_ACQ_H */
diff --git a/source/application/hal/include/data_psn.h b/source/application/hal/include/data_psn.h
new file mode 100644
index 0000000..8c14c77
--- /dev/null
+++ b/source/application/hal/include/data_psn.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DATA_PSN_H
+#define DATA_PSN_H
+
+/**
+ * This file is the top level abstraction for the data presentation module
+ **/
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+/* Structure to encompass the data presentation module and it's methods */
+typedef struct data_presentation_module {
+    int inited;                 /**< initialised or not */
+    char system_name[8];        /**< name of the system in use */
+    int (* system_init)(void);  /**< pointer to init function */
+
+    /** Pointer to the image presentation function */
+    int (* present_data_image)(uint8_t *data, const uint32_t width,
+        const uint32_t height, const uint32_t channels,
+        const uint32_t pos_x, const uint32_t pos_y,
+        const uint32_t downsample_factor);
+
+    /* Pointer to text presentation function */
+    int (* present_data_text)(const char *str, const size_t str_sz,
+        const uint32_t pos_x, const uint32_t pos_y,
+        const bool allow_multiple_lines);
+
+    /* Pointer to box presentation function */
+    int (* present_box)(const uint32_t pos_x, const uint32_t pos_y,
+        const uint32_t width, const uint32_t height, const uint16_t color);
+
+    /* Pointer to clear presentation function */
+    int (* clear)(const uint16_t color);
+
+    /* Pointer to set text color presentation function */
+    int (* set_text_color)(const uint16_t color);
+} data_psn_module;
+
+
+/**
+ * @brief           Initialises the data presentation system.
+ * @param[in,out]   module  Pointer to a pre-allocated data
+ *                          presentation structure object.
+ * @return          0 if successful, error code otherwise.
+ **/
+int data_psn_system_init(data_psn_module *module);
+
+/**
+ * @brief           Releases the data presentation system.
+ * @param[in,out]   module  Pointer to a pre-allocated data
+ *                          presentation structure object.
+ * @return          0 if successful, error code otherwise.
+ **/
+int data_psn_system_release(data_psn_module *module);
+
+#endif /* DATA_PSN_H */
diff --git a/source/application/hal/include/hal.h b/source/application/hal/include/hal.h
new file mode 100644
index 0000000..26ba1e3
--- /dev/null
+++ b/source/application/hal/include/hal.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef PLATFORM_HAL_H
+#define PLATFORM_HAL_H
+
+/**
+ * This file should present a C API for the main application logic to use
+ * and be indifferent to the lower level platform. In addition to this it
+ * will also need to be aware of the API exposed by data acquisition and
+ * data presentation modules.
+ */
+#include "hal_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "data_acq.h"                   /* Data acquisition abstraction */
+#include "data_psn.h"                   /* Data presentation abstraction */
+#include "timer.h"                      /* Timer/profiler API */
+
+/* Structure to define a platform context to be used by the application */
+typedef struct hal_platform_context {
+    int inited;                         /**< initialised */
+    char plat_name[16];                 /**< name of this platform */
+    data_acq_module * data_acq;         /**< data acquisition module pointer */
+    data_psn_module * data_psn;         /**< data presentation module pointer */
+    platform_timer *  timer;            /**< timer */
+    int (* platform_init)();            /**< pointer to platform initialisation function */
+    void (* platform_release)();        /**< pointer to platform release function */
+} hal_platform;
+
+/**
+ * @brief           Initialise the HAL structure based on compile time config. This
+ *                  should be called before any other function in this API.
+ * @param[in,out]   platform    Pointer to a pre-allocated platform struct.
+ * @param[in,out]   data_acq    Pointer to a pre-allocated data acquisition module.
+ * @param[in,out]   data_psn    Pointer to a pre-allocated data presentation module.
+ * @param[in,out]   timer       Pointer to a pre-allocated timer module.
+ * @return          0 if successful, error code otherwise.
+ **/
+int hal_init(hal_platform *platform, data_acq_module *data_acq,
+    data_psn_module *data_psn, platform_timer *timer);
+
+
+/**
+ * @brief       Initialise the HAL platform. This will go and initialise all the
+ *              modules on the platform the application requires to run.
+ * @param[in]   platform    Pointer to a pre-allocated and initialised
+ *                          platform structure.
+ * @return      0 if successful, error code otherwise.
+ **/
+int hal_platform_init(hal_platform *platform);
+
+
+/**
+ * @brief       Release the HAL platform. This should release resources acquired.
+ * @param[in]   platform    pointer to a pre-allocated and initialised
+ *                          platform structure.
+ **/
+void hal_platform_release(hal_platform *platform);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PLATFORM_HAL_H */
diff --git a/source/application/hal/include/hal_config.h b/source/application/hal/include/hal_config.h
new file mode 100644
index 0000000..55db973
--- /dev/null
+++ b/source/application/hal/include/hal_config.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HAL_CONFIG_H
+#define HAL_CONFIG_H
+
+/* This header provides some basic configuration for HAL */
+
+/* Platform definitions for the systems we expect to support */
+#define PLATFORM_CORTEX_M_BAREMETAL 1U
+#define PLATFORM_UNKNOWN_LINUX_OS   3U
+
+/* This should come from compile time definition */
+#ifndef PLATFORM_HAL
+    #define PLATFORM_HAL    PLATFORM_UNKNOWN_LINUX_OS    /* Default platform */
+#endif /* PLATFORM_HAL */
+
+#if ((PLATFORM_HAL) == PLATFORM_CORTEX_M_BAREMETAL)
+    #include "bsp.h"
+#elif ((PLATFORM_HAL) == PLATFORM_UNKNOWN_LINUX_OS)
+    #include "dummy_log.h"
+#else
+    #error "Invalid platform!"
+#endif /* PLATFORM_HAL==PLATFORM_CORTEX_M_BAREMETAL */
+
+#if !defined (DESIGN_NAME)
+    #define DESIGN_NAME   ("N/A")
+#endif /* !defined (DESIGN_NAME) */
+
+#endif /* HAL_CONFIG_H */
diff --git a/source/application/hal/include/timer.h b/source/application/hal/include/timer.h
new file mode 100644
index 0000000..2955b7f
--- /dev/null
+++ b/source/application/hal/include/timer.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HAL_TIMER_H
+#define HAL_TIMER_H
+
+#include "hal_config.h"
+
+#if ((PLATFORM_HAL) == PLATFORM_CORTEX_M_BAREMETAL)
+#include "baremetal_timer.h"
+#elif ((PLATFORM_HAL) == PLATFORM_UNKNOWN_LINUX_OS)
+#include "native_timer.h"
+#else
+#error "Platform does not support a timer API"
+#endif /* PLATFORM_HAL */
+
+/** Struct for describing the capabilities available for
+ * the timer provided by HAL */
+typedef struct _platform_timer_capability {
+    uint32_t npu_cycles:    1;
+    uint32_t cpu_cycles:    1;
+    uint32_t duration_ms:   1;
+    uint32_t duration_us:   1;
+} timer_capability;
+
+/* Structure to hold a platform specific timer implementation */
+typedef struct _platform_timer {
+    int inited;                 /**< initialised or not */
+    timer_capability cap;       /**< capability of this timer */
+
+    /* reset the timer */
+    void (* reset)(void);
+
+    /* Gets the current time counter. */
+    time_counter (* get_time_counter)(void);
+
+    /* Gets the duration in milliseconds. */
+    time_t (* get_duration_ms)(time_counter *start, time_counter *end);
+
+    /* Gets duration in microseconds. */
+    time_t (* get_duration_us)(time_counter *start, time_counter *end);
+
+    /* Gets difference in CPU cycle counts. */
+    uint32_t (* get_cpu_cycle_diff)(time_counter *start, time_counter *end);
+
+    /* Gets the difference in terms of total NPU cycle counts. */
+    uint64_t (* get_npu_total_cycle_diff)(time_counter *start, time_counter *end);
+
+    /* Gets the difference in terms of active NPU cycle counts. */
+    uint64_t (* get_npu_active_cycle_diff)(time_counter *start, time_counter *end);
+
+    /* Wraps get_time_counter function with additional profiling
+     * initialisation, if required. */
+    time_counter (* start_profiling)(void);
+
+    /* Wraps get_time_counter function along with additional instructions when
+     * profiling ends, if required. */
+    time_counter (* stop_profiling)(void);
+
+} platform_timer;
+
+/**
+ * @brief   Initialise the timer available for the platform.
+ **/
+void init_timer(platform_timer *timer);
+
+#endif /* HAL_TIMER_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-core/include/bsp_core_log.h b/source/application/hal/platforms/bare-metal/bsp/bsp-core/include/bsp_core_log.h
new file mode 100644
index 0000000..f049209
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-core/include/bsp_core_log.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BSP_CORE_LOG_H
+#define BSP_CORE_LOG_H
+
+#include "uart_stdout.h"    /* UART for logging */
+
+#include <stdio.h>
+
+#define LOG_LEVEL_TRACE       0
+#define LOG_LEVEL_DEBUG       1
+#define LOG_LEVEL_INFO        2
+#define LOG_LEVEL_WARN        3
+#define LOG_LEVEL_ERROR       4
+
+#ifndef LOG_LEVEL
+#define LOG_LEVEL             LOG_LEVEL_INFO
+#endif /*LOG_LEVEL*/
+
+#if (LOG_LEVEL == LOG_LEVEL_TRACE)
+    #define trace(...)        printf("[TRACE] "); printf(__VA_ARGS__)
+#else
+    #define trace(...)
+#endif  /* LOG_LEVEL == LOG_LEVEL_TRACE */
+
+#if (LOG_LEVEL <= LOG_LEVEL_DEBUG)
+    #define debug(...)        printf("[DEBUG] "); printf(__VA_ARGS__)
+#else
+    #define debug(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_TRACE */
+
+#if (LOG_LEVEL <= LOG_LEVEL_INFO)
+    #define info(...)         printf("[INFO] "); printf(__VA_ARGS__)
+#else
+    #define info(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_DEBUG */
+
+#if (LOG_LEVEL <= LOG_LEVEL_WARN)
+    #define warn(...)         printf("[WARN] "); printf(__VA_ARGS__)
+#else
+    #define warn(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_INFO */
+
+#if (LOG_LEVEL <= LOG_LEVEL_ERROR)
+    #define printf_err(...)   printf("[ERROR] "); printf(__VA_ARGS__)
+#else
+    #define printf_err(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_INFO */
+
+#define UNUSED(x)       ((void)(x))
+
+#endif /* BSP_CORE_LOG_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-core/include/uart_stdout.h b/source/application/hal/platforms/bare-metal/bsp/bsp-core/include/uart_stdout.h
new file mode 100644
index 0000000..9c5fbcf
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-core/include/uart_stdout.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef UART_STDOUT_H
+#define UART_STDOUT_H
+
+#include <stdbool.h>
+
+/**
+ * @brief       Initialised the UART block.
+ **/
+extern void UartStdOutInit(void);
+
+/**
+ * @brief       Transmits a character over UART (blocking call).
+ * @param[in]   my_ch Character to be transmitted.
+ * @return      Character transmitted.
+ **/
+extern unsigned char UartPutc(unsigned char my_ch);
+
+/**
+ * @brief       Receives a character from the UART block (blocking call).
+ * @return      Character received.
+ **/
+extern unsigned char UartGetc(void);
+
+/**
+ * @brief       Reads characters from the UART block until a line feed or
+ *              carriage return terminates the function. NULL character
+ *              also terminates the function, error is returned.
+ * @param[out]  lp      Characters read from the UART block.
+ * @param[in]   len     Character to be transmitted.
+ * @return      true if successful, false otherwise.
+ **/
+extern bool GetLine(char *lp, unsigned int len);
+
+/**
+ * @brief       Terminates UART simulation. This is useful when a Fixed
+ *              Virtual Platform's session needs to be gracefully terminated.
+ * @param[in]   code Terminating code displayed on the UART before the end of the simulation.
+ **/
+extern void UartEndSimulation(int code);
+
+#endif /* UART_STDOUT_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-core/retarget.c b/source/application/hal/platforms/bare-metal/bsp/bsp-core/retarget.c
new file mode 100644
index 0000000..cf31a53
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-core/retarget.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+
+#include "uart_stdout.h"
+#include "bsp_core_log.h"
+
+#if defined (MPS3_PLATFORM)
+#include "smm_mps3.h"
+#endif  /* MPS3_PLATFORM */
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <rt_misc.h>
+#include <rt_sys.h>
+
+
+/* Standard IO device handles. */
+#define STDIN   0x8001
+#define STDOUT  0x8002
+#define STDERR  0x8003
+
+/* Standard IO device name defines. */
+const char __stdin_name[]   = "STDIN";
+const char __stdout_name[]  = "STDOUT";
+const char __stderr_name[]  = "STDERR";
+
+int fputc(int ch, FILE *f)
+{
+    UNUSED(f);
+    return (UartPutc(ch));
+}
+
+int fgetc(FILE *f)
+{
+    UNUSED(f);
+    return (UartPutc(UartGetc()));
+}
+
+int ferror(FILE *f)
+{
+    UNUSED(f);
+    /* Your implementation of ferror */
+    return EOF;
+}
+
+void _ttywrch(int ch)
+{
+    UartPutc(ch);
+}
+
+FILEHANDLE _sys_open(const char *name, int openmode)
+{
+    UNUSED(openmode);
+
+    /* Register standard Input Output devices. */
+    if (strcmp(name, "STDIN") == 0)
+    {
+        return (STDIN);
+    }
+    if (strcmp(name, "STDOUT") == 0)
+    {
+        return (STDOUT);
+    }
+    if (strcmp(name, "STDERR") == 0)
+    {
+        return (STDERR);
+    }
+    return (-1);
+}
+
+int _sys_close(FILEHANDLE fh)
+{
+    if (fh > 0x8000)
+    {
+        return (0);
+    }
+    return (-1);
+}
+
+int _sys_write(FILEHANDLE fh, const unsigned char *buf, unsigned int len, int mode)
+{
+    UNUSED(mode);
+    if (fh == STDOUT || fh == STDERR )
+    {
+        /* Standard Output device. */
+        for (; len; len--)
+        {
+            UartPutc(*buf++);
+        }
+        return (0);
+    }
+
+    if (fh > 0x8000)
+    {
+        return (-1);
+    }
+    return (-1);
+}
+
+int _sys_read(FILEHANDLE fh, unsigned char *buf, unsigned int len, int mode)
+{
+    UNUSED(mode);
+    if (fh == STDIN)
+    {
+        /* Standard Input device. */
+        for (; len; len--)
+        {
+            *buf++ = UartGetc();
+        }
+        return (0);
+    }
+
+    if (fh > 0x8000)
+    {
+        return (-1);
+    }
+    return (-1);
+}
+
+int _sys_istty(FILEHANDLE fh)
+{
+    if (fh > 0x8000)
+    {
+        return (1);
+    }
+    return (0);
+}
+
+int _sys_seek(FILEHANDLE fh, long pos)
+{
+    UNUSED(pos);
+    if (fh > 0x8000)
+    {
+        return (-1);
+    }
+    return (-1);
+}
+
+int _sys_ensure(FILEHANDLE fh)
+{
+    if (fh > 0x8000)
+    {
+        return (-1);
+    }
+    return (-1);
+}
+
+long _sys_flen(FILEHANDLE fh)
+{
+    if (fh > 0x8000)
+    {
+        return (0);
+    }
+    return (-1);
+}
+
+int _sys_tmpnam(char *name, int sig, unsigned maxlen)
+{
+    UNUSED(name);
+    UNUSED(sig);
+    UNUSED(maxlen);
+    return (1);
+}
+
+char *_sys_command_string(char *cmd, int len)
+{
+    UNUSED(len);
+    return (cmd);
+}
+
+void _sys_exit(int return_code)
+{
+    UartEndSimulation(return_code);
+}
+
+int system(const char *cmd)
+{
+    UNUSED(cmd);
+    return (0);
+}
+
+time_t time(time_t *timer)
+{
+    time_t current;
+
+#if defined (MPS3_PLATFORM)
+    current = MPS3_FPGAIO->COUNTER;
+#else   /* MPS3_PLATFORM */
+    current  = 0;   /* No RTC implementation available. */
+#endif  /* MPS3_PLATFORM */
+
+    if (timer != NULL) {
+        *timer = current;
+    }
+
+    return (current);
+}
+
+#else   /* #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) */
+
+/******************************************************************************/
+/* Retarget functions for GNU Tools for ARM Embedded Processors               */
+/******************************************************************************/
+#include <stdio.h>
+#include <sys/stat.h>
+
+extern unsigned char UartPutc(unsigned char my_ch);
+
+__attribute__((used)) int _write(int fd, char *ptr, int len)
+{
+    size_t i;
+    for (i = 0; i < len; i++)
+    {
+        UartPutc(ptr[i]); /* call character output function. */
+    }
+    return len;
+}
+
+#endif /* #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/device_mps3.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/device_mps3.c
new file mode 100644
index 0000000..f4f2e6b
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/device_mps3.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "device_mps3.h"
+
+#include "bsp_core_log.h"
+#include "smm_mps3.h"
+
+uint32_t GetMPS3CoreClock(void)
+{
+    const uint32_t default_clock = 32000000;
+    static int warned_once = 0;
+    if (0 != MPS3_SCC->CFG_ACLK) {
+        return MPS3_SCC->CFG_ACLK;
+    }
+
+    if (!warned_once) {
+        warn("MPS3_SCC->CFG_ACLK reads 0. Assuming default clock of %u\n",
+            default_clock);
+        warned_once = 1;
+    }
+    return default_clock;
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/glcd_mps3.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/glcd_mps3.c
new file mode 100644
index 0000000..530be4f
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/glcd_mps3.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "glcd_mps3.h"
+
+#include "bsp_core_log.h"
+#include "font_9x15_h.h"
+#include "smm_mps3.h"
+
+/*-------------- CLCD Controller Internal Register addresses ----------------*/
+#define CHAR_COM        ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x000))
+#define CHAR_DAT        ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x004))
+#define CHAR_RD         ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x008))
+#define CHAR_RAW        ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x00C))
+#define CHAR_MASK       ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x010))
+#define CHAR_STAT       ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x014))
+#define CHAR_MISC       ((volatile unsigned int *)(CLCD_CONFIG_BASE + 0x04C))
+
+/*--------------- Graphic LCD interface hardware definitions -----------------*/
+/* Pin CS setting to 0 or 1                                                   */
+#define LCD_CS(x)   ((x) ? (*CHAR_MISC |= CLCD_CS_Msk)    : (*CHAR_MISC &= ~CLCD_CS_Msk))
+#define LCD_RST(x)  ((x) ? (*CHAR_MISC |= CLCD_RESET_Msk) : (*CHAR_MISC &= ~CLCD_RESET_Msk))
+#define LCD_BL(x)   ((x) ? (*CHAR_MISC |= CLCD_BL_Msk)    : (*CHAR_MISC &= ~CLCD_BL_Msk))
+
+#define BG_COLOR  0                     /* Background colour                  */
+#define TXT_COLOR 1                     /* Text colour                        */
+
+/**
+* Text and background colour
+*/
+static volatile unsigned short Color[2] = {Black, White};
+
+/**
+ * @brief     Delay in while loop cycles.
+ * @param[in] cnt    Number of while cycles to delay.
+ **/
+static void delay (int cnt)
+{
+    cnt <<= DELAY_2N;
+    while (cnt != 0) {
+        --cnt;
+    }
+}
+
+/**
+ * @brief       Write a command the LCD controller.
+ * @param[in]   cmd    Command to be written.
+ */
+static __inline void wr_cmd(unsigned char cmd)
+{
+    LCD_CS(0);
+    *CHAR_COM = cmd;
+    LCD_CS(1);
+}
+
+/**
+ * @brief       Start of data writing to the LCD controller.
+ */
+static __inline void wr_dat_start (void)
+{
+    LCD_CS(0);
+}
+
+/**
+ * @brief       Stop of data writing to the LCD controller.
+ */
+static __inline void wr_dat_stop (void)
+{
+    LCD_CS(1);
+}
+
+/**
+ * @brief       Data writing to the LCD controller.
+ * @param[in]   dat    Data to be written.
+ */
+static __inline void wr_dat_only(unsigned short dat)
+{
+    *CHAR_DAT = (dat >>   8);   /* Write D8..D15    */
+    *CHAR_DAT = (dat & 0xFF);   /* Write D0..D7     */
+}
+
+/**
+ * @brief       Write a value to the to LCD register.
+ * @param[in]   reg    Register to be written.
+ * @param[in]   val    Value to write to the register.
+ */
+static __inline void wr_reg(unsigned char reg, unsigned short val)
+{
+    LCD_CS(0);
+    *CHAR_COM = reg;
+    wr_dat_only(val);
+    LCD_CS(1);
+}
+
+/**
+ * @brief       Converts a gray value to RGB565 representation.
+ * @param[in]   src_uchar   Pointer to the source pixel.
+ * @return      16 bit RGB565 value.
+ */
+static inline uint16_t _GLCD_Gray8_to_RGB565(uint8_t *src_uchar)
+{
+    uint16_t val_r = (*src_uchar >> 3);
+    uint16_t val_g = (*src_uchar >> 2);
+    return ((val_r << 11) | (val_g << 5) | val_r);
+}
+
+/**
+ * @brief       Converts an RGB888 value to RGB565 representation.
+ * @param[in]   src_uchar   Pointer to the source pixel for R (assumed to
+ *                          be RGB format).
+ * @return      16 bit RGB565 value.
+ */
+static inline uint16_t _GLCD_RGB888_to_RGB565(uint8_t *src_uchar)
+{
+    uint16_t val_r = (*src_uchar >> 3) & 0x1F;
+    uint16_t val_g = (*(src_uchar+1) >> 2) & 0x3F;
+    uint16_t val_b = (*(src_uchar+2) >> 3) & 0x1F;
+    return ((val_r << 11) | (val_g << 5) | val_b);
+}
+
+/* Helper typedef to encapsulate the colour conversion function
+ * signatures */
+typedef uint16_t (* std_clr_2_lcd_clr_fn)(uint8_t *src_uchar);
+
+void GLCD_SetWindow(unsigned int x, unsigned int y, unsigned int w, unsigned int h) {
+    unsigned int xe, ye;
+
+    xe = x+w-1;
+    ye = y+h-1;
+
+    wr_reg(0x02, x  >>    8);   /* Column address start MSB           */
+    wr_reg(0x03, x  &  0xFF);   /* Column address start LSB           */
+    wr_reg(0x04, xe >>    8);   /* Column address end MSB             */
+    wr_reg(0x05, xe &  0xFF);   /* Column address end LSB             */
+
+    wr_reg(0x06, y  >>    8);   /* Row address start MSB              */
+    wr_reg(0x07, y  &  0xFF);   /* Row address start LSB              */
+    wr_reg(0x08, ye >>    8);   /* Row address end MSB                */
+    wr_reg(0x09, ye &  0xFF);   /* Row address end LSB                */
+}
+
+void GLCD_WindowMax(void)
+{
+    GLCD_SetWindow (0, 0, GLCD_WIDTH, GLCD_HEIGHT);
+}
+
+void GLCD_SetTextColor(unsigned short color)
+{
+    Color[TXT_COLOR] = color;
+}
+
+void GLCD_SetBackColor(unsigned short color)
+{
+    Color[BG_COLOR] = color;
+}
+
+void GLCD_Clear(unsigned short color)
+{
+    unsigned int i;
+
+    GLCD_WindowMax();
+    wr_cmd(0x22);
+    wr_dat_start();
+
+    for(i = 0; i < (GLCD_WIDTH*GLCD_HEIGHT); ++i) {
+        wr_dat_only(color);
+    }
+    wr_dat_stop();
+}
+
+
+void GLCD_DrawChar(
+        unsigned int x, unsigned int y,
+        unsigned int cw, unsigned int ch,
+        unsigned char *c)
+{
+    unsigned int i, j, k, pixs;
+
+    /* Sanity check: out of bounds? */
+    if ((x + cw) > GLCD_WIDTH || (y + ch) > GLCD_HEIGHT) {
+        return;
+    }
+
+    GLCD_SetWindow(x, y, cw, ch);
+
+    wr_cmd(0x22);
+    wr_dat_start();
+
+    k  = (cw + 7)/8;
+
+    if (k == 1) {
+        for (j = 0; j < ch; ++j) {
+            pixs = *(unsigned char  *)c;
+            c += 1;
+
+            for (i = 0; i < cw; ++i) {
+                wr_dat_only (Color[(pixs >> i) & 1]);
+            }
+        }
+    }
+    else if (k == 2) {
+        for (j = 0; j < ch; ++j) {
+            pixs = *(unsigned short *)c;
+            c += 2;
+
+            for (i = 0; i < cw; ++i) {
+                wr_dat_only (Color[(pixs >> i) & 1]);
+            }
+        }
+    }
+    wr_dat_stop();
+}
+
+void GLCD_DisplayChar(
+        unsigned int ln, unsigned int col,
+        unsigned char fi, unsigned char c)
+{
+    c -= 32;
+    switch (fi) {
+        case 0: /* Font 9 x 15. */
+            GLCD_DrawChar(col * 9, ln * 15, 9, 15,
+                         (unsigned char *)&Font_9x15_h[c * 15]);
+            break;
+    }
+}
+
+void GLCD_DisplayString(
+        unsigned int ln, unsigned int col,
+        unsigned char fi, char *s)
+{
+  while (*s) {
+    GLCD_DisplayChar(ln, col++, fi, *s++);
+  }
+}
+
+
+
+void GLCD_ClearLn(unsigned int ln, unsigned char fi)
+{
+    unsigned char i;
+    char buf[60];
+
+    GLCD_WindowMax();
+    switch (fi) {
+        case 0:  /* Font 9x15*/
+            for (i = 0; i < (GLCD_WIDTH+8)/9; ++i) {
+                buf[i] = ' ';
+            }
+            buf[i+1] = 0;
+            break;
+    }
+    GLCD_DisplayString (ln, 0, fi, buf);
+}
+
+void GLCD_Bitmap(unsigned int x, unsigned int y,
+        unsigned int w, unsigned int h,
+        unsigned short *bitmap)
+{
+    unsigned int i;
+    unsigned short *bitmap_ptr = bitmap;
+
+    GLCD_SetWindow (x, y, w, h);
+
+    wr_cmd(0x22);
+    wr_dat_start();
+
+    for (i = 0; i < (w*h); ++i) {
+        wr_dat_only (bitmap_ptr[i]);
+    }
+    wr_dat_stop();
+}
+
+void GLCD_Image(void *data, const uint32_t width,
+    const uint32_t height, const uint32_t channels,
+    const uint32_t pos_x, const uint32_t pos_y,
+    const uint32_t downsample_factor)
+{
+    uint32_t i, j = 0; /* for loops */
+    const uint32_t x_incr = channels * downsample_factor; /* stride. */
+    const uint32_t y_incr = channels * width * (downsample_factor - 1); /* skip rows. */
+    uint8_t* src_unsigned = (uint8_t *)data; /* temporary pointer. */
+    std_clr_2_lcd_clr_fn cvt_clr_fn = 0; /* colour conversion function. */
+
+    /* Based on number of channels, we decide which of the above functions to use. */
+    switch (channels) {
+        case 1:
+            cvt_clr_fn = _GLCD_Gray8_to_RGB565;
+            break;
+
+        case 3:
+            cvt_clr_fn = _GLCD_RGB888_to_RGB565;
+            break;
+
+        default:
+            printf_err("number of channels not supported by display\n");
+            return;
+    }
+
+    /* Set the window position expected. Note: this is integer div. */
+    GLCD_SetWindow(pos_x, pos_y,
+        width/downsample_factor, height/downsample_factor);
+    wr_cmd(0x22);
+    wr_dat_start();
+
+    /* Loop over the image. */
+    for (j = height; j != 0; j -= downsample_factor) {
+        for (i = width; i != 0; i -= downsample_factor) {
+            wr_dat_only(cvt_clr_fn(src_unsigned));
+            src_unsigned += x_incr;
+        }
+
+        /* Skip rows if needed. */
+        src_unsigned += y_incr;
+    }
+
+    wr_dat_stop();
+}
+
+void GLCD_Box(
+        unsigned int x, unsigned int y,
+        unsigned int w, unsigned int h,
+        unsigned short color)
+{
+    unsigned int i;
+
+    GLCD_SetWindow (x, y, w, h);
+
+    wr_cmd(0x22);
+    wr_dat_start();
+    for(i = 0; i < (w*h); ++i){
+        wr_dat_only (color);
+    }
+    wr_dat_stop();
+}
+
+
+void GLCD_Initialize (void)
+{
+    /* CLCD screen setup (Default CLCD screen interface state) ------------- */
+    LCD_CS(1);              /* deassert nCS0. */
+    LCD_RST(1);             /* deassert Reset. */
+    LCD_BL(0);              /* switch off backlight. */
+
+    /* Reset CLCD screen --------------------------------------------------- */
+    LCD_RST(0);             /* assert Reset. */
+    delay(1);
+    LCD_RST(1);             /* deassert Reset. */
+    delay(10);
+
+    /* Driving ability settings ----------------------------------------------*/
+    wr_reg(0xEA, 0x00);     /* Power control internal used (1).    */
+    wr_reg(0xEB, 0x20);     /* Power control internal used (2).    */
+    wr_reg(0xEC, 0x0C);     /* Source control internal used (1).   */
+    wr_reg(0xED, 0xC7);     /* Source control internal used (2).   */
+    wr_reg(0xE8, 0x38);     /* Source output period Normal mode.   */
+    wr_reg(0xE9, 0x10);     /* Source output period Idle mode.     */
+    wr_reg(0xF1, 0x01);     /* RGB 18-bit interface ;0x0110.       */
+    wr_reg(0xF2, 0x10);
+
+    /* Adjust the Gamma Curve ------------------------------------------------*/
+    wr_reg(0x40, 0x01);
+    wr_reg(0x41, 0x00);
+    wr_reg(0x42, 0x00);
+    wr_reg(0x43, 0x10);
+    wr_reg(0x44, 0x0E);
+    wr_reg(0x45, 0x24);
+    wr_reg(0x46, 0x04);
+    wr_reg(0x47, 0x50);
+    wr_reg(0x48, 0x02);
+    wr_reg(0x49, 0x13);
+    wr_reg(0x4A, 0x19);
+    wr_reg(0x4B, 0x19);
+    wr_reg(0x4C, 0x16);
+
+    wr_reg(0x50, 0x1B);
+    wr_reg(0x51, 0x31);
+    wr_reg(0x52, 0x2F);
+    wr_reg(0x53, 0x3F);
+    wr_reg(0x54, 0x3F);
+    wr_reg(0x55, 0x3E);
+    wr_reg(0x56, 0x2F);
+    wr_reg(0x57, 0x7B);
+    wr_reg(0x58, 0x09);
+    wr_reg(0x59, 0x06);
+    wr_reg(0x5A, 0x06);
+    wr_reg(0x5B, 0x0C);
+    wr_reg(0x5C, 0x1D);
+    wr_reg(0x5D, 0xCC);
+
+    /* Power voltage setting -------------------------------------------------*/
+    wr_reg(0x1B, 0x1B);
+    wr_reg(0x1A, 0x01);
+    wr_reg(0x24, 0x2F);
+    wr_reg(0x25, 0x57);
+    wr_reg(0x23, 0x88);
+
+    /* Power on setting ------------------------------------------------------*/
+    wr_reg(0x18, 0x36);    /* Internal oscillator frequency adj.  */
+    wr_reg(0x19, 0x01);    /* Enable internal oscillator.         */
+    wr_reg(0x01, 0x00);    /* Normal mode, no scroll.             */
+    wr_reg(0x1F, 0x88);    /* Power control 6 - DDVDH Off.        */
+    delay(20);
+    wr_reg(0x1F, 0x82);    /* Power control 6 - Step-up: 3 x VCI. */
+    delay(5);
+    wr_reg(0x1F, 0x92);    /* Power control 6 - Step-up: On.      */
+    delay(5);
+    wr_reg(0x1F, 0xD2);    /* Power control 6 - VCOML active.     */
+    delay(5);
+
+    /* Color selection -------------------------------------------------------*/
+    wr_reg(0x17, 0x55);    /* RGB, System interface: 16 Bit/Pixel. */
+    wr_reg(0x00, 0x00);    /* Scrolling off, no standby.           */
+
+    /* Interface config ------------------------------------------------------*/
+    wr_reg(0x2F, 0x11);    /* LCD Drive: 1-line inversion.        */
+    wr_reg(0x31, 0x00);
+    wr_reg(0x32, 0x00);    /* DPL=0, HSPL=0, VSPL=0, EPL=0.       */
+
+    /* Display on setting ----------------------------------------------------*/
+    wr_reg(0x28, 0x38);    /* PT(0,0) active, VGL/VGL.            */
+    delay(20);
+    wr_reg(0x28, 0x3C);    /* Display active, VGL/VGL.            */
+
+#if (LANDSCAPE == 1)
+#if (ROTATE180 == 0)
+    wr_reg (0x16, 0xA8);
+#else /* (ROTATE180 == 0) */
+    wr_reg (0x16, 0x68);
+#endif /* (ROTATE180 == 0) */
+#else /* (LANDSCAPE == 1) */
+#if (ROTATE180 == 0)
+    wr_reg (0x16, 0x08);
+#else /* (ROTATE180 == 0) */
+     wr_reg (0x16, 0xC8);
+#endif /* (ROTATE180 == 0) */
+#endif /* (LANDSCAPE == 1) */
+
+    /* Display scrolling settings --------------------------------------------*/
+    wr_reg(0x0E, 0x00);         /* TFA MSB */
+    wr_reg(0x0F, 0x00);         /* TFA LSB */
+    wr_reg(0x10, 320 >> 8);     /* VSA MSB */
+    wr_reg(0x11, 320 &  0xFF);  /* VSA LSB */
+    wr_reg(0x12, 0x00);         /* BFA MSB */
+    wr_reg(0x13, 0x00);         /* BFA LSB */
+
+    LCD_BL(1);                  /* turn on backlight                  */
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/device_mps3.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/device_mps3.h
new file mode 100644
index 0000000..f0bab79
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/device_mps3.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DEVICE_MPS3_H
+#define DEVICE_MPS3_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "cmsis.h"      /* CMSIS device header. */
+#include "smm_mps3.h"   /* Memory map for MPS3. */
+
+#include <stdio.h>
+
+typedef struct _CMSDK_UART_TypeDef_
+{
+    __IO uint32_t  DATA;        /* Offset: 0x000 (R/W) Data Register.    */
+    __IO uint32_t  STATE;       /* Offset: 0x004 (R/W) Status Register.  */
+    __IO uint32_t  CTRL;        /* Offset: 0x008 (R/W) Control Register. */
+
+    union {
+    __I  uint32_t  INTSTATUS;   /* Offset: 0x00C (R/ ) Interrupt Status Register. */
+    __O  uint32_t  INTCLEAR;    /* Offset: 0x00C ( /W) Interrupt Clear Register. */
+    };
+    __IO uint32_t  BAUDDIV;     /* Offset: 0x010 (R/W) Baudrate Divider Register. */
+
+} CMSDK_UART_TypeDef;
+
+#define CMSDK_UART0             ((CMSDK_UART_TypeDef *)CMSDK_UART0_BASE)
+
+/* CMSDK_UART DATA Register Definitions. */
+#define CMSDK_UART_DATA_Pos               0                                             /* CMSDK_UART_DATA_Pos: DATA Position. */
+#define CMSDK_UART_DATA_Msk              (0xFFul << CMSDK_UART_DATA_Pos)                /* CMSDK_UART DATA: DATA Mask. */
+
+/* CMSDK_UART STATE Register Definitions. */
+#define CMSDK_UART_STATE_RXOR_Pos         3                                             /* CMSDK_UART STATE: RXOR Position. */
+#define CMSDK_UART_STATE_RXOR_Msk         (0x1ul << CMSDK_UART_STATE_RXOR_Pos)          /* CMSDK_UART STATE: RXOR Mask. */
+
+#define CMSDK_UART_STATE_TXOR_Pos         2                                             /* CMSDK_UART STATE: TXOR Position. */
+#define CMSDK_UART_STATE_TXOR_Msk         (0x1ul << CMSDK_UART_STATE_TXOR_Pos)          /* CMSDK_UART STATE: TXOR Mask. */
+
+#define CMSDK_UART_STATE_RXBF_Pos         1                                             /* CMSDK_UART STATE: RXBF Position. */
+#define CMSDK_UART_STATE_RXBF_Msk         (0x1ul << CMSDK_UART_STATE_RXBF_Pos)          /* CMSDK_UART STATE: RXBF Mask. */
+
+#define CMSDK_UART_STATE_TXBF_Pos         0                                             /* CMSDK_UART STATE: TXBF Position. */
+#define CMSDK_UART_STATE_TXBF_Msk         (0x1ul << CMSDK_UART_STATE_TXBF_Pos )         /* CMSDK_UART STATE: TXBF Mask. */
+
+/* CMSDK_UART CTRL Register Definitions. */
+#define CMSDK_UART_CTRL_HSTM_Pos          6                                             /* CMSDK_UART CTRL: HSTM Position. */
+#define CMSDK_UART_CTRL_HSTM_Msk          (0x01ul << CMSDK_UART_CTRL_HSTM_Pos)          /* CMSDK_UART CTRL: HSTM Mask. */
+
+#define CMSDK_UART_CTRL_RXORIRQEN_Pos     5                                             /* CMSDK_UART CTRL: RXORIRQEN Position. */
+#define CMSDK_UART_CTRL_RXORIRQEN_Msk     (0x01ul << CMSDK_UART_CTRL_RXORIRQEN_Pos)     /* CMSDK_UART CTRL: RXORIRQEN Mask. */
+
+#define CMSDK_UART_CTRL_TXORIRQEN_Pos     4                                             /* CMSDK_UART CTRL: TXORIRQEN Position. */
+#define CMSDK_UART_CTRL_TXORIRQEN_Msk     (0x01ul << CMSDK_UART_CTRL_TXORIRQEN_Pos)     /* CMSDK_UART CTRL: TXORIRQEN Mask. */
+
+#define CMSDK_UART_CTRL_RXIRQEN_Pos       3                                             /* CMSDK_UART CTRL: RXIRQEN Position. */
+#define CMSDK_UART_CTRL_RXIRQEN_Msk       (0x01ul << CMSDK_UART_CTRL_RXIRQEN_Pos)       /* CMSDK_UART CTRL: RXIRQEN Mask. */
+
+#define CMSDK_UART_CTRL_TXIRQEN_Pos       2                                             /* CMSDK_UART CTRL: TXIRQEN Position. */
+#define CMSDK_UART_CTRL_TXIRQEN_Msk       (0x01ul << CMSDK_UART_CTRL_TXIRQEN_Pos)       /* CMSDK_UART CTRL: TXIRQEN Mask. */
+
+#define CMSDK_UART_CTRL_RXEN_Pos          1                                             /* CMSDK_UART CTRL: RXEN Position. */
+#define CMSDK_UART_CTRL_RXEN_Msk          (0x01ul << CMSDK_UART_CTRL_RXEN_Pos)          /* CMSDK_UART CTRL: RXEN Mask. */
+
+#define CMSDK_UART_CTRL_TXEN_Pos          0                                             /* CMSDK_UART CTRL: TXEN Position. */
+#define CMSDK_UART_CTRL_TXEN_Msk          (0x01ul << CMSDK_UART_CTRL_TXEN_Pos)          /* CMSDK_UART CTRL: TXEN Mask. */
+
+/* CMSDK_UART INTSTATUS\INTCLEAR Register Definitions. */
+#define CMSDK_UART_INT_RXORIRQ_Pos        3                                             /* CMSDK_UART INT: RXORIRQ Position. */
+#define CMSDK_UART_INT_RXORIRQ_Msk        (0x01ul << CMSDK_UART_INT_RXORIRQ_Pos)        /* CMSDK_UART INT: RXORIRQ Mask. */
+
+#define CMSDK_UART_INT_TXORIRQ_Pos        2                                             /* CMSDK_UART INT: TXORIRQ Position. */
+#define CMSDK_UART_INT_TXORIRQ_Msk        (0x01ul << CMSDK_UART_INT_TXORIRQ_Pos)        /* CMSDK_UART INT: TXORIRQ Mask. */
+
+#define CMSDK_UART_INT_RXIRQ_Pos          1                                             /* CMSDK_UART INT: RXIRQ Position. */
+#define CMSDK_UART_INT_RXIRQ_Msk          (0x01ul << CMSDK_UART_INT_RXIRQ_Pos)          /* CMSDK_UART INT: RXIRQ Mask. */
+
+#define CMSDK_UART_INT_TXIRQ_Pos          0                                             /* CMSDK_UART INT: TXIRQ Position. */
+#define CMSDK_UART_INT_TXIRQ_Msk          (0x01ul << CMSDK_UART_INT_TXIRQ_Pos)          /* CMSDK_UART INT: TXIRQ Mask. */
+
+/* CMSDK_UART BAUDDIV Register Definitions. */
+#define CMSDK_UART_BAUDDIV_Pos            0                                             /* CMSDK_UART BAUDDIV: BAUDDIV Position. */
+#define CMSDK_UART_BAUDDIV_Msk           (0xFFFFFul << CMSDK_UART_BAUDDIV_Pos)
+
+/**
+ * @brief   Gets the core clock set for MPS3.
+ * @return  Clock value in Hz.
+ **/
+uint32_t GetMPS3CoreClock(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* DEVICE_MPS3_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/font_9x15_h.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/font_9x15_h.h
new file mode 100644
index 0000000..b8b6bdc
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/font_9x15_h.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Font Generated by MikroElektronika GLCD Font Creator 1.2.0.0
+//MikroElektrnika 2011
+//http://www.mikroe.com
+
+//GLCD FontName : Lucida_Console9x15
+//GLCD FontSize : 9x15
+
+#ifndef FONT_9x15_H_H
+#define FONT_9x15_H_H
+
+const unsigned short Font_9x15_h[] = {
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 32. */
+    0x00,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x00,0x10,0x10,0x00,0x00,0x00,      /* Code for char num 33. */
+    0x44,0x44,0x44,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 34. */
+    0x00,0x12,0x12,0x24,0x7F,0x24,0x28,0x48,0xFE,0x48,0x90,0x90,0x00,0x00,0x00,      /* Code for char num 35. */
+    0x10,0x7C,0x16,0x12,0x12,0x1C,0x38,0x70,0x50,0x50,0x52,0x3E,0x10,0x00,0x00,      /* Code for char num 36. */
+    0x00,0x8C,0x92,0x52,0x52,0x2C,0x10,0x08,0x68,0x94,0x92,0x92,0x62,0x00,0x00,      /* Code for char num 37. */
+    0x00,0x18,0x24,0x24,0x34,0x18,0x0C,0x12,0xB2,0xE2,0xC2,0xBC,0x00,0x00,0x00,      /* Code for char num 38. */
+    0x08,0x08,0x08,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 39. */
+    0xC0,0x60,0x10,0x10,0x08,0x08,0x08,0x08,0x08,0x08,0x10,0x10,0x60,0xC0,0x00,      /* Code for char num 40. */
+    0x0C,0x18,0x20,0x20,0x40,0x40,0x40,0x40,0x40,0x40,0x20,0x20,0x18,0x0C,0x00,      /* Code for char num 41. */
+    0x00,0x10,0x92,0xEE,0x18,0x28,0x28,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 42. */
+    0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x10,0xFE,0x10,0x10,0x10,0x00,0x00,0x00,      /* Code for char num 43. */
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x10,0x08,0x00,      /* Code for char num 44. */
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7C,0x00,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 45. */
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,      /* Code for char num 46. */
+    0x80,0x40,0x40,0x60,0x20,0x20,0x10,0x10,0x08,0x08,0x0C,0x04,0x04,0x02,0x00,      /* Code for char num 47. */
+    0x00,0x38,0x44,0x82,0x82,0x82,0x82,0x82,0x82,0x82,0x44,0x38,0x00,0x00,0x00,      /* Code for char num 48. */
+    0x00,0x10,0x1E,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0xFE,0x00,0x00,0x00,      /* Code for char num 49. */
+    0x00,0x3E,0x42,0x40,0x40,0x40,0x20,0x10,0x08,0x04,0x02,0x7E,0x00,0x00,0x00,      /* Code for char num 50. */
+    0x00,0x3C,0x40,0x40,0x40,0x60,0x38,0x40,0x40,0x40,0x40,0x3C,0x00,0x00,0x00,      /* Code for char num 51. */
+    0x00,0x20,0x30,0x28,0x24,0x24,0x22,0x21,0x7F,0x20,0x20,0x20,0x00,0x00,0x00,      /* Code for char num 52. */
+    0x00,0x7C,0x04,0x04,0x04,0x1C,0x20,0x40,0x40,0x40,0x20,0x3C,0x00,0x00,0x00,      /* Code for char num 53. */
+    0x00,0x78,0x04,0x04,0x02,0x3A,0x46,0x82,0x82,0x82,0x44,0x38,0x00,0x00,0x00,      /* Code for char num 54. */
+    0x00,0xFE,0x80,0x40,0x20,0x20,0x10,0x10,0x08,0x08,0x04,0x04,0x00,0x00,0x00,      /* Code for char num 55. */
+    0x00,0x3C,0x42,0x42,0x42,0x24,0x1C,0x62,0x42,0x42,0x42,0x3C,0x00,0x00,0x00,      /* Code for char num 56. */
+    0x00,0x38,0x44,0x82,0x82,0x82,0xC4,0xB8,0x80,0x40,0x40,0x3C,0x00,0x00,0x00,      /* Code for char num 57. */
+    0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,      /* Code for char num 58. */
+    0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x18,0x18,0x10,0x08,0x00,      /* Code for char num 59. */
+    0x00,0x00,0x00,0x00,0x80,0x60,0x10,0x0C,0x0C,0x10,0x60,0x80,0x00,0x00,0x00,      /* Code for char num 60. */
+    0x00,0x00,0x00,0x00,0x00,0x00,0xFE,0x00,0x00,0xFE,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 61. */
+    0x00,0x00,0x00,0x00,0x02,0x0C,0x10,0x60,0x60,0x10,0x0C,0x02,0x00,0x00,0x00,      /* Code for char num 62. */
+    0x00,0x3E,0x42,0x42,0x40,0x20,0x10,0x08,0x08,0x00,0x08,0x08,0x00,0x00,0x00,      /* Code for char num 63. */
+    0x00,0x78,0x84,0xE2,0x92,0x8A,0x8A,0xCA,0xCA,0xB2,0xA6,0x3C,0x00,0x00,0x00,      /* Code for char num 64. */
+    0x00,0x00,0x10,0x38,0x28,0x28,0x44,0x44,0xFE,0x82,0x82,0x82,0x00,0x00,0x00,      /* Code for char num 65. */
+    0x00,0x00,0x3E,0x42,0x42,0x22,0x1E,0x22,0x42,0x42,0x42,0x3E,0x00,0x00,0x00,      /* Code for char num 66. */
+    0x00,0x00,0xF8,0x06,0x02,0x01,0x01,0x01,0x01,0x02,0x06,0xF8,0x00,0x00,0x00,      /* Code for char num 67. */
+    0x00,0x00,0x3E,0x42,0x82,0x82,0x82,0x82,0x82,0x82,0x42,0x3E,0x00,0x00,0x00,      /* Code for char num 68. */
+    0x00,0x00,0xFE,0x02,0x02,0x02,0x02,0x7E,0x02,0x02,0x02,0xFE,0x00,0x00,0x00,      /* Code for char num 69. */
+    0x00,0x00,0xFE,0x02,0x02,0x02,0x02,0x7E,0x02,0x02,0x02,0x02,0x00,0x00,0x00,      /* Code for char num 70. */
+    0x00,0x00,0xF8,0x06,0x02,0x01,0x01,0xE1,0x81,0x82,0x86,0xF8,0x00,0x00,0x00,      /* Code for char num 71. */
+    0x00,0x00,0x42,0x42,0x42,0x42,0x42,0x7E,0x42,0x42,0x42,0x42,0x00,0x00,0x00,      /* Code for char num 72. */
+    0x00,0x00,0xFE,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0xFE,0x00,0x00,0x00,      /* Code for char num 73. */
+    0x00,0x00,0x3C,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x1E,0x00,0x00,0x00,      /* Code for char num 74. */
+    0x00,0x00,0x42,0x22,0x12,0x0A,0x06,0x0A,0x12,0x22,0x42,0x82,0x00,0x00,0x00,      /* Code for char num 75. */
+    0x00,0x00,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0xFE,0x00,0x00,0x00,      /* Code for char num 76. */
+    0x00,0x00,0x63,0x63,0x63,0x55,0x55,0x55,0x4D,0x49,0x41,0x41,0x00,0x00,0x00,      /* Code for char num 77. */
+    0x00,0x00,0x82,0x86,0x8A,0x8A,0x92,0x92,0xA2,0xA2,0xC2,0x82,0x00,0x00,0x00,      /* Code for char num 78. */
+    0x00,0x00,0x3C,0x42,0x81,0x81,0x81,0x81,0x81,0x81,0x42,0x3C,0x00,0x00,0x00,      /* Code for char num 79. */
+    0x00,0x00,0x3E,0x42,0x42,0x42,0x62,0x1E,0x02,0x02,0x02,0x02,0x00,0x00,0x00,      /* Code for char num 80. */
+    0x00,0x00,0x3C,0x42,0x81,0x81,0x81,0x81,0x81,0x81,0x42,0x3C,0x60,0x80,0x00,      /* Code for char num 81. */
+    0x00,0x00,0x3E,0x42,0x42,0x42,0x22,0x1E,0x12,0x22,0x42,0x82,0x00,0x00,0x00,      /* Code for char num 82. */
+    0x00,0x00,0x7C,0x42,0x02,0x06,0x1C,0x20,0x40,0x40,0x42,0x3E,0x00,0x00,0x00,      /* Code for char num 83. */
+    0x00,0x00,0xFE,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x00,0x00,0x00,      /* Code for char num 84. */
+    0x00,0x00,0x82,0x82,0x82,0x82,0x82,0x82,0x82,0x82,0x44,0x3C,0x00,0x00,0x00,      /* Code for char num 85. */
+    0x00,0x00,0x82,0x82,0x82,0x82,0x44,0x44,0x28,0x28,0x38,0x10,0x00,0x00,0x00,      /* Code for char num 86. */
+    0x00,0x00,0x82,0x82,0x92,0x92,0xAA,0xAA,0xAA,0xAA,0x64,0x44,0x00,0x00,0x00,      /* Code for char num 87. */
+    0x00,0x00,0x82,0x82,0x44,0x28,0x10,0x10,0x28,0x44,0x82,0x82,0x00,0x00,0x00,      /* Code for char num 88. */
+    0x00,0x00,0x82,0x82,0x44,0x44,0x28,0x10,0x10,0x10,0x10,0x10,0x00,0x00,0x00,      /* Code for char num 89. */
+    0x00,0x00,0xFF,0x80,0x40,0x20,0x10,0x08,0x04,0x02,0x01,0xFF,0x00,0x00,0x00,      /* Code for char num 90. */
+    0xF8,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0xF8,0x00,      /* Code for char num 91. */
+    0x02,0x04,0x04,0x04,0x08,0x08,0x10,0x10,0x20,0x20,0x20,0x40,0x40,0x80,0x00,      /* Code for char num 92. */
+    0x3E,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3E,0x00,      /* Code for char num 93. */
+    0x00,0x10,0x10,0x10,0x28,0x28,0x44,0x44,0x44,0x82,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 94. */
+    0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFE,0x00,0x00,      /* Code for char num 95. */
+    0x10,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 96. */
+    0x00,0x00,0x00,0x00,0x3C,0x40,0x40,0x78,0x44,0x42,0x62,0xDC,0x00,0x00,0x00,      /* Code for char num 97. */
+    0x02,0x02,0x02,0x02,0x7A,0x46,0x82,0x82,0x82,0x82,0x46,0x3A,0x00,0x00,0x00,      /* Code for char num 98. */
+    0x00,0x00,0x00,0x00,0xF8,0x04,0x02,0x02,0x02,0x02,0x04,0xF8,0x00,0x00,0x00,      /* Code for char num 99. */
+    0x80,0x80,0x80,0x80,0xB8,0xC4,0x82,0x82,0x82,0x82,0xC4,0xBC,0x00,0x00,0x00,      /* Code for char num 100. */
+    0x00,0x00,0x00,0x00,0x38,0x44,0x42,0x7E,0x02,0x02,0x04,0x78,0x00,0x00,0x00,      /* Code for char num 101. */
+    0xF0,0x08,0x08,0x08,0xFE,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x00,0x00,0x00,      /* Code for char num 102. */
+    0x00,0x00,0x00,0x00,0xB8,0xC4,0x82,0x82,0x82,0x82,0xC4,0xBC,0x80,0x40,0x3C,      /* Code for char num 103. */
+    0x02,0x02,0x02,0x02,0x3A,0x46,0x42,0x42,0x42,0x42,0x42,0x42,0x00,0x00,0x00,      /* Code for char num 104. */
+    0x18,0x18,0x00,0x00,0x1E,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x00,0x00,0x00,      /* Code for char num 105. */
+    0x30,0x30,0x00,0x00,0x3C,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x1E,      /* Code for char num 106. */
+    0x02,0x02,0x02,0x02,0x42,0x22,0x12,0x0E,0x0A,0x12,0x22,0x42,0x00,0x00,0x00,      /* Code for char num 107. */
+    0x1E,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x00,0x00,0x00,      /* Code for char num 108. */
+    0x00,0x00,0x00,0x00,0xDA,0xB6,0x92,0x92,0x92,0x92,0x92,0x92,0x00,0x00,0x00,      /* Code for char num 109. */
+    0x00,0x00,0x00,0x00,0x3A,0x46,0x42,0x42,0x42,0x42,0x42,0x42,0x00,0x00,0x00,      /* Code for char num 110. */
+    0x00,0x00,0x00,0x00,0x38,0x44,0x82,0x82,0x82,0x82,0x44,0x38,0x00,0x00,0x00,      /* Code for char num 111. */
+    0x00,0x00,0x00,0x00,0x7A,0x46,0x82,0x82,0x82,0x82,0x46,0x3A,0x02,0x02,0x02,      /* Code for char num 112. */
+    0x00,0x00,0x00,0x00,0xB8,0xC4,0x82,0x82,0x82,0x82,0xC4,0xBC,0x80,0x80,0x80,      /* Code for char num 113. */
+    0x00,0x00,0x00,0x00,0xF4,0x8C,0x04,0x04,0x04,0x04,0x04,0x04,0x00,0x00,0x00,      /* Code for char num 114. */
+    0x00,0x00,0x00,0x00,0x7C,0x02,0x02,0x0C,0x30,0x40,0x42,0x3E,0x00,0x00,0x00,      /* Code for char num 115. */
+    0x00,0x00,0x08,0x08,0xFE,0x08,0x08,0x08,0x08,0x08,0x08,0xF0,0x00,0x00,0x00,      /* Code for char num 116. */
+    0x00,0x00,0x00,0x00,0x42,0x42,0x42,0x42,0x42,0x42,0x62,0x5C,0x00,0x00,0x00,      /* Code for char num 117. */
+    0x00,0x00,0x00,0x00,0x82,0x82,0x82,0x44,0x44,0x28,0x28,0x10,0x00,0x00,0x00,      /* Code for char num 118. */
+    0x00,0x00,0x00,0x00,0x82,0x92,0xAA,0xAA,0xAA,0xAA,0x44,0x44,0x00,0x00,0x00,      /* Code for char num 119. */
+    0x00,0x00,0x00,0x00,0x82,0x44,0x28,0x10,0x10,0x28,0x44,0x82,0x00,0x00,0x00,      /* Code for char num 120. */
+    0x00,0x00,0x00,0x00,0x82,0x82,0x82,0x44,0x44,0x28,0x28,0x10,0x10,0x0C,0x00,      /* Code for char num 121. */
+    0x00,0x00,0x00,0x00,0xFE,0x80,0x40,0x20,0x10,0x08,0x04,0xFE,0x00,0x00,0x00,      /* Code for char num 122. */
+    0xE0,0x10,0x10,0x10,0x10,0x10,0x10,0x0C,0x10,0x10,0x10,0x10,0x10,0xE0,0x00,      /* Code for char num 123. */
+    0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x00,      /* Code for char num 124. */
+    0x0E,0x10,0x10,0x10,0x10,0x10,0x10,0x60,0x10,0x10,0x10,0x10,0x10,0x0E,0x00,      /* Code for char num 125. */
+    0x00,0x00,0x00,0x00,0x00,0x00,0x62,0x92,0x8C,0x00,0x00,0x00,0x00,0x00,0x00,      /* Code for char num 126. */
+    0x00,0x00,0x00,0x07,0x05,0x05,0x05,0x05,0x05,0x05,0x07,0x00,0x00,0x00,0x00       /* Code for char num 127. */
+};
+
+
+#endif /* FONT_9x15_H_H */
\ No newline at end of file
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/glcd_mps3.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/glcd_mps3.h
new file mode 100644
index 0000000..c2810c0
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/glcd_mps3.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef GLCD_MPS3_H
+#define GLCD_MPS3_H
+
+#include <stdint.h>
+
+/******************************************************************************
+  Color coding
+  GLCD is coded:   15..11 red, 10..5 green, 4..0 blue  (unsigned short)
+                   GLCD_R5, GLCD_G6, GLCD_B5
+  original coding: 17..12 red, 11..6 green, 5..0 blue
+                   ORG_R6,  ORG_G6,  ORG_B6
+
+  ORG_R1..5 = GLCD_R0..4,  ORG_R0 = GLCD_R4
+  ORG_G0..5 = GLCD_G0..5,
+  ORG_B1..5 = GLCD_B0..4,  ORG_B0 = GLCD_B4
+
+  GLCD RGB color definitions
+******************************************************************************/
+#define Black           0x0000      /*   0,   0,   0 */
+#define Navy            0x000F      /*   0,   0, 128 */
+#define DarkGreen       0x03E0      /*   0, 128,   0 */
+#define DarkCyan        0x03EF      /*   0, 128, 128 */
+#define Maroon          0x7800      /* 128,   0,   0 */
+#define Purple          0x780F      /* 128,   0, 128 */
+#define Olive           0x7BE0      /* 128, 128,   0 */
+#define LightGrey       0xC618      /* 192, 192, 192 */
+#define DarkGrey        0x7BEF      /* 128, 128, 128 */
+#define Blue            0x001F      /*   0,   0, 255 */
+#define Green           0x07E0      /*   0, 255,   0 */
+#define Cyan            0x07FF      /*   0, 255, 255 */
+#define Red             0xF800      /* 255,   0,   0 */
+#define Magenta         0xF81F      /* 255,   0, 255 */
+#define Yellow          0xFFE0      /* 255, 255, 0   */
+#define White           0xFFFF      /* 255, 255, 255 */
+
+/************************** Orientation  configuration ************************/
+#ifndef LANDSCAPE
+#define LANDSCAPE   1               /* 1 for landscape, 0 for portrait.    */
+#endif
+#ifndef ROTATE180
+#define ROTATE180   1               /* 1 to rotate the screen for 180 deg. */
+#endif
+
+/*------------------------- Speed dependant settings -------------------------*/
+
+/* If processor works on high frequency delay has to be increased, it can be
+   increased by factor 2^N by this constant. */
+#define DELAY_2N    8
+
+/*---------------------- Graphic LCD size definitions ------------------------*/
+#if (LANDSCAPE == 1)
+   #define GLCD_WIDTH       320                 /* Screen Width (in pixels). */
+   #define GLCD_HEIGHT      240                 /* Screen Hight (in pixels). */
+#else
+   #define GLCD_WIDTH       240                 /* Screen Width (in pixels). */
+   #define GLCD_HEIGHT      320                 /* Screen Hight (in pixels). */
+#endif
+
+#define BPP                 16                  /* Bits per pixel.           */
+#define BYPP                ((BPP+7)/8)         /* Bytes per pixel.          */
+
+
+/**
+ * @brief      Initialize the Himax LCD with HX8347-D LCD Controller.
+ */
+void GLCD_Initialize(void);
+
+/**
+ * @brief      Set draw window region to whole screen.
+ */
+void GLCD_WindowMax(void);
+
+/**
+ * @brief      Set draw window region.
+ * @param[in]  x  Horizontal position.
+ * @param[in]  y  Vertical position.
+ * @param[in]  w  Window width in pixel.
+ * @param[in]  h  Window height in pixels.
+ */
+void GLCD_SetWindow(unsigned int x, unsigned int y,
+                     unsigned int w, unsigned int h);
+
+/**
+ * @brief      Set foreground color.
+ * @param[in]  color    Foreground color.
+ */
+void GLCD_SetTextColor(unsigned short color);
+
+/**
+ * @brief      Set background color.
+ * @param[in]  color    Background color.
+ */
+void GLCD_SetBackColor(unsigned short color);
+
+/**
+ * @brief      Clear display.
+ * @param[in]  color Display clearing color.
+ *
+ */
+void GLCD_Clear(unsigned short color);
+
+/**
+ * @brief      Draw character on given position.
+ * @param[in]  x     Horizontal position.
+ * @param[in]  y     Vertical position.
+ * @param[in]  cw    Character width in pixel.
+ * @param[in]  ch    Character height in pixels.
+ * @param[in]  c     Pointer to character bitmap.
+ *
+ */
+void GLCD_DrawChar(unsigned int x,  unsigned int y,
+                  unsigned int cw, unsigned int ch,
+                  unsigned char *c);
+
+/**
+ * @brief      Display character on given line.
+ * @param[in]  ln    Line number.
+ * @param[in]  col   Column number.
+ * @param[in]  fi    Font index (0 = 9x15).
+ * @param[in]  c     ASCII character.
+ */
+void GLCD_DisplayChar(unsigned int ln, unsigned int col,
+                     unsigned char fi, unsigned char  c);
+
+
+/**
+ * @brief      Display string on given line.
+ * @param[in]  ln    Line number.
+ * @param[in]  col   Column number.
+ * @param[in]  fi    Font index (0 = 9x15).
+ * @param[in]  s     Pointer to string.
+ */
+void GLCD_DisplayString(unsigned int ln, unsigned int col,
+                        unsigned char fi, char *s);
+
+/**
+ * @brief      Clear given line.
+ * @param[in]  ln:   Line number.
+ * @param[in]  fi    Font index (0 = 9x15).
+ */
+void GLCD_ClearLn(unsigned int ln, unsigned char fi);
+
+/**
+ * @brief      Display graphical bitmap image at position x horizontally and y
+ *             vertically. This function is optimized for 16 bits per pixel
+ *             format, it has to be adapted for any other format.
+ * @param[in]  x        Horizontal position.
+ * @param[in]  y        Vertical position.
+ * @param[in]  w        Width of bitmap.
+ * @param[in]  h        Height of bitmap.
+ * @param[in]  bitmap   Address at which the bitmap data resides.
+ */
+void GLCD_Bitmap(unsigned int x,  unsigned int y,
+               unsigned int w, unsigned int h,
+               unsigned short *bitmap);
+
+/**
+ * @brief Displays an 8 bit image, conversion to the LCD's
+ *        16 bit codec is done on the fly.
+ * @param[in]  data                 Pointer to the full sized image data.
+ * @param[in]  width                Image width.
+ * @param[in]  height               Image height.
+ * @param[in]  channels             Number of channels in the image.
+ * @param[in]  pos_x                Start x position for the LCD.
+ * @param[in]  pos_y                Start y position for the LCD.
+ * @param[in]  downsample_factor    Factor by which the image
+ *                                  is downsampled by.
+ */
+void GLCD_Image(void *data, const uint32_t width,
+               const uint32_t height, const uint32_t channels,
+               const uint32_t pos_x, const uint32_t pos_y,
+               const uint32_t downsample_factor);
+
+/**
+ * @brief      Draw box filled with color.
+ * @param[in]  x        Horizontal position.
+ * @param[in]  y        Vertical position.
+ * @param[in]  w        Window width in pixels.
+ * @param[in]  h        Window height in pixels.
+ * @param[in]  color    Box color.
+ */
+void GLCD_Box(unsigned int x, unsigned int y,
+            unsigned int w, unsigned int h,
+            unsigned short color);
+
+#endif /* GLCD_MPS3_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/smm_mps3.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/smm_mps3.h
new file mode 100644
index 0000000..1c0e0f2
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/smm_mps3.h
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef SMM_MPS3_H
+#define SMM_MPS3_H
+
+#include "cmsis.h"                  /* Device specific header file. */
+#include "peripheral_memmap.h"      /* Peripheral memory map definitions. */
+
+#if defined ( __CC_ARM   )
+#pragma anon_unions
+#endif
+
+/******************************************************************************/
+/*                          FPGA System Register declaration                  */
+/******************************************************************************/
+
+typedef struct
+{
+  __IO uint32_t LED;             /* Offset: 0x000 (R/W)  LED connections
+                                  *                         [31:2] : Reserved
+                                  *                          [1:0] : LEDs
+                                  */
+       uint32_t RESERVED1[1];
+  __IO uint32_t BUTTON;          /* Offset: 0x008 (R/W)  Buttons
+                                  *                         [31:2] : Reserved
+                                  *                          [1:0] : Buttons
+                                  */
+       uint32_t RESERVED2[1];
+  __IO uint32_t CLK1HZ;          /* Offset: 0x010 (R/W)  1Hz up counter    */
+  __IO uint32_t CLK100HZ;        /* Offset: 0x014 (R/W)  100Hz up counter  */
+  __IO uint32_t COUNTER;         /* Offset: 0x018 (R/W)  Cycle Up Counter
+                                  *                         Increments when 32-bit prescale counter reach zero
+                                  */
+  __IO uint32_t PRESCALE;        /* Offset: 0x01C (R/W)  Prescaler
+                                  *                         Bit[31:0] : reload value for prescale counter
+                                  */
+  __IO uint32_t PSCNTR;          /* Offset: 0x020 (R/W)  32-bit Prescale counter
+                                  *                         current value of the pre-scaler counter
+                                  *                         The Cycle Up Counter increment when the prescale down counter reach 0
+                                  *                         The pre-scaler counter is reloaded with PRESCALE after reaching 0.
+                                  */
+       uint32_t RESERVED3[1];
+  __IO uint32_t SWITCHES;        /* Offset: 0x028 (R/W)  Switches
+                                  *                         [31:8] : Reserved
+                                  *                          [7:0] : Switches
+                                  */
+       uint32_t RESERVED4[8];
+  __IO uint32_t MISC;            /* Offset: 0x04C (R/W)  Misc control
+                                  *                         [31:10] : Reserved
+                                  *                            [9] :
+                                  *                            [8] :
+                                  *                            [7] : ADC_SPI_nCS
+                                  *                            [6] : CLCD_BL_CTRL
+                                  *                            [5] : CLCD_RD
+                                  *                            [4] : CLCD_RS
+                                  *                            [3] : CLCD_RESET
+                                  *                            [2] : SHIELD_1_SPI_nCS
+                                  *                            [1] : SHIELD_0_SPI_nCS
+                                  *                            [0] : CLCD_CS
+                                  */
+} MPS3_FPGAIO_TypeDef;
+
+/* MISC register bit definitions. */
+
+#define CLCD_CS_Pos        0
+#define CLCD_CS_Msk        (1UL<<CLCD_CS_Pos)
+#define SHIELD_0_nCS_Pos   1
+#define SHIELD_0_nCS_Msk   (1UL<<SHIELD_0_nCS_Pos)
+#define SHIELD_1_nCS_Pos   2
+#define SHIELD_1_nCS_Msk   (1UL<<SHIELD_1_nCS_Pos)
+#define CLCD_RESET_Pos     3
+#define CLCD_RESET_Msk     (1UL<<CLCD_RESET_Pos)
+#define CLCD_RS_Pos        4
+#define CLCD_RS_Msk        (1UL<<CLCD_RS_Pos)
+#define CLCD_RD_Pos        5
+#define CLCD_RD_Msk        (1UL<<CLCD_RD_Pos)
+#define CLCD_BL_Pos        6
+#define CLCD_BL_Msk        (1UL<<CLCD_BL_Pos)
+#define ADC_nCS_Pos        7
+#define ADC_nCS_Msk        (1UL<<ADC_nCS_Pos)
+
+/******************************************************************************/
+/*                        SCC Register declaration                            */
+/******************************************************************************/
+
+typedef struct
+{
+  __IO uint32_t CFG_REG0;        /* Offset: 0x000 (R/W)  Remaps block RAM to ZBT
+                                  *                         [31:1] : Reserved
+                                  *                            [0] 1 : REMAP BlockRam to ZBT
+                                  */
+  __IO uint32_t LEDS;            /* Offset: 0x004 (R/W)  Controls the MCC user LEDs
+                                  *                         [31:8] : Reserved
+                                  *                          [7:0] : MCC LEDs
+                                  */
+       uint32_t RESERVED0[1];
+  __I  uint32_t SWITCHES;        /* Offset: 0x00C (R/ )  Denotes the state of the MCC user switches
+                                  *                         [31:8] : Reserved
+                                  *                          [7:0] : These bits indicate state of the MCC switches
+                                  */
+  __I  uint32_t CFG_REG4;        /* Offset: 0x010 (R/ )  Denotes the board revision
+                                  *                         [31:4] : Reserved
+                                  *                          [3:0] : Used by the MCC to pass PCB revision. 0 = A 1 = B
+                                  */
+  __I  uint32_t CFG_ACLK;        /* Offset: 0x014 (R/ )  System Clock
+                                  */
+       uint32_t RESERVED1[34];
+  __IO uint32_t SYS_CFGDATA_RTN; /* Offset: 0x0A0 (R/W)  User data register
+                                  *                         [31:0] : Data
+                                  */
+  __IO uint32_t SYS_CFGDATA_OUT; /* Offset: 0x0A4 (R/W)  User data register
+                                  *                         [31:0] : Data
+                                  */
+  __IO uint32_t SYS_CFGCTRL;     /* Offset: 0x0A8 (R/W)  Control register
+                                  *                           [31] : Start (generates interrupt on write to this bit)
+                                  *                           [30] : R/W access
+                                  *                        [29:26] : Reserved
+                                  *                        [25:20] : Function value
+                                  *                        [19:12] : Reserved
+                                  *                         [11:0] : Device (value of 0/1/2 for supported clocks)
+                                  */
+  __IO uint32_t SYS_CFGSTAT;     /* Offset: 0x0AC (R/W)  Contains status information
+                                  *                         [31:2] : Reserved
+                                  *                            [1] : Error
+                                  *                            [0] : Complete
+                                  */
+  __IO uint32_t RESERVED2[20];
+  __IO uint32_t SCC_DLL;         /* Offset: 0x100 (R/W)  DLL Lock Register
+                                  *                        [31:24] : DLL LOCK MASK[7:0] - Indicate if the DLL locked is masked
+                                  *                        [23:16] : DLL LOCK MASK[7:0] - Indicate if the DLLs are locked or unlocked
+                                  *                         [15:1] : Reserved
+                                  *                            [0] : This bit indicates if all enabled DLLs are locked
+                                  */
+       uint32_t RESERVED3[957];
+  __I  uint32_t SCC_AID;         /* Offset: 0xFF8 (R/ )  SCC AID Register
+                                  *                        [31:24] : FPGA build number
+                                  *                        [23:20] : V2M-MPS3 target board revision (A = 0, B = 1)
+                                  *                        [19:11] : Reserved
+                                  *                           [10] : if “1” SCC_SW register has been implemented
+                                  *                            [9] : if “1” SCC_LED register has been implemented
+                                  *                            [8] : if “1” DLL lock register has been implemented
+                                  *                          [7:0] : number of SCC configuration register
+                                  */
+  __I  uint32_t SCC_ID;          /* Offset: 0xFFC (R/ )  Contains information about the FPGA image
+                                  *                        [31:24] : Implementer ID: 0x41 = ARM
+                                  *                        [23:20] : Application note IP variant number
+                                  *                        [19:16] : IP Architecture: 0x4 =AHB
+                                  *                         [15:4] : Primary part number: 386 = AN386
+                                  *                          [3:0] : Application note IP revision number
+                                  */
+} MPS3_SCC_TypeDef;
+
+
+/******************************************************************************/
+/*                        SSP Peripheral declaration                          */
+/******************************************************************************/
+
+typedef struct
+{
+  __IO uint32_t CR0;             /* Offset: 0x000 (R/W)  Control register 0
+                                  *                        [31:16] : Reserved
+                                  *                         [15:8] : Serial clock rate
+                                  *                            [7] : SSPCLKOUT phase,    applicable to Motorola SPI frame format only
+                                  *                            [6] : SSPCLKOUT polarity, applicable to Motorola SPI frame format only
+                                  *                          [5:4] : Frame format
+                                  *                          [3:0] : Data Size Select
+                                  */
+  __IO uint32_t CR1;             /* Offset: 0x004 (R/W)  Control register 1
+                                  *                         [31:4] : Reserved
+                                  *                            [3] : Slave-mode output disable
+                                  *                            [2] : Master or slave mode select
+                                  *                            [1] : Synchronous serial port enable
+                                  *                            [0] : Loop back mode
+                                  */
+  __IO uint32_t DR;              /* Offset: 0x008 (R/W)  Data register
+                                  *                        [31:16] : Reserved
+                                  *                         [15:0] : Transmit/Receive FIFO
+                                  */
+  __I  uint32_t SR;              /* Offset: 0x00C (R/ )  Status register
+                                  *                         [31:5] : Reserved
+                                  *                            [4] : PrimeCell SSP busy flag
+                                  *                            [3] : Receive FIFO full
+                                  *                            [2] : Receive FIFO not empty
+                                  *                            [1] : Transmit FIFO not full
+                                  *                            [0] : Transmit FIFO empty
+                                  */
+  __IO uint32_t CPSR;            /* Offset: 0x010 (R/W)  Clock prescale register
+                                  *                         [31:8] : Reserved
+                                  *                          [8:0] : Clock prescale divisor
+                                  */
+  __IO uint32_t IMSC;            /* Offset: 0x014 (R/W)  Interrupt mask set or clear register
+                                  *                         [31:4] : Reserved
+                                  *                            [3] : Transmit FIFO interrupt mask
+                                  *                            [2] : Receive FIFO interrupt mask
+                                  *                            [1] : Receive timeout interrupt mask
+                                  *                            [0] : Receive overrun interrupt mask
+                                  */
+  __I  uint32_t RIS;             /* Offset: 0x018 (R/ )  Raw interrupt status register
+                                  *                         [31:4] : Reserved
+                                  *                            [3] : raw interrupt state, prior to masking, of the SSPTXINTR interrupt
+                                  *                            [2] : raw interrupt state, prior to masking, of the SSPRXINTR interrupt
+                                  *                            [1] : raw interrupt state, prior to masking, of the SSPRTINTR interrupt
+                                  *                            [0] : raw interrupt state, prior to masking, of the SSPRORINTR interrupt
+                                  */
+  __I  uint32_t MIS;             /* Offset: 0x01C (R/ )  Masked interrupt status register
+                                  *                         [31:4] : Reserved
+                                  *                            [3] : transmit FIFO masked interrupt state, after masking, of the SSPTXINTR interrupt
+                                  *                            [2] : receive FIFO masked interrupt state, after masking, of the SSPRXINTR interrupt
+                                  *                            [1] : receive timeout masked interrupt state, after masking, of the SSPRTINTR interrupt
+                                  *                            [0] : receive over run masked interrupt status, after masking, of the SSPRORINTR interrupt
+                                  */
+  __O  uint32_t ICR;             /* Offset: 0x020 ( /W)  Interrupt clear register
+                                  *                         [31:2] : Reserved
+                                  *                            [1] : Clears the SSPRTINTR interrupt
+                                  *                            [0] : Clears the SSPRORINTR interrupt
+                                  */
+  __IO uint32_t DMACR;           /* Offset: 0x024 (R/W)  DMA control register
+                                  *                         [31:2] : Reserved
+                                  *                            [1] : Transmit DMA Enable
+                                  *                            [0] : Receive DMA Enable
+                                  */
+} MPS3_SSP_TypeDef;
+
+
+/* SSP_CR0 Control register 0. */
+#define SSP_CR0_DSS_Pos         0           /* Data Size Select.    */
+#define SSP_CR0_DSS_Msk         (0xF<<SSP_CR0_DSS_Pos)
+#define SSP_CR0_FRF_Pos         4           /* Frame Format Select. */
+#define SSP_CR0_FRF_Msk         (3UL<<SSP_CR0_FRM_Pos)
+#define SSP_CR0_SPO_Pos         6           /* SSPCLKOUT polarity.  */
+#define SSP_CR0_SPO_Msk         (1UL<<SSP_CR0_SPO_Pos)
+#define SSP_CR0_SPH_Pos         7           /* SSPCLKOUT phase.     */
+#define SSP_CR0_SPH_Msk         (1UL<<SSP_CR0_SPH_Pos)
+#define SSP_CR0_SCR_Pos         8           /* Serial Clock Rate (divide). */
+#define SSP_CR0_SCR_Msk         (0xFF<<SSP_CR0_SCR_Pos)
+
+#define SSP_CR0_SCR_DFLT        0x0300      /* Serial Clock Rate (divide), default set at 3. */
+#define SSP_CR0_FRF_MOT         0x0000      /* Frame format.                                 */
+#define SSP_CR0_DSS_8           0x0007      /* Data packet size, 8bits.                      */
+#define SSP_CR0_DSS_16          0x000F      /* Data packet size, 16bits.                     */
+
+/* SSP_CR1 Control register 1. */
+#define SSP_CR1_LBM_Pos         0           /* Loop Back Mode. */
+#define SSP_CR1_LBM_Msk         (1UL<<SSP_CR1_LBM_Pos)
+#define SSP_CR1_SSE_Pos         1           /* Serial port enable. */
+#define SSP_CR1_SSE_Msk         (1UL<<SSP_CR1_SSE_Pos)
+#define SSP_CR1_MS_Pos          2           /* Master or Slave mode. */
+#define SSP_CR1_MS_Msk          (1UL<<SSP_CR1_MS_Pos)
+#define SSP_CR1_SOD_Pos         3           /* Slave Output mode Disable. */
+#define SSP_CR1_SOD_Msk         (1UL<<SSP_CR1_SOD_Pos)
+
+/* SSP_SR Status register. */
+#define SSP_SR_TFE_Pos          0           /* Transmit FIFO empty. */
+#define SSP_SR_TFE_Msk          (1UL<<SSP_SR_TFE_Pos)
+#define SSP_SR_TNF_Pos          1           /* Transmit FIFO not full. */
+#define SSP_SR_TNF_Msk          (1UL<<SSP_SR_TNF_Pos)
+#define SSP_SR_RNE_Pos          2           /* Receive  FIFO not empty. */
+#define SSP_SR_RNE_Msk          (1UL<<SSP_SR_RNE_Pos)
+#define SSP_SR_RFF_Pos          3           /* Receive  FIFO full. */
+#define SSP_SR_RFF_Msk          (1UL<<SSP_SR_RFF_Pos)
+#define SSP_SR_BSY_Pos          4           /* Busy. */
+#define SSP_SR_BSY_Msk          (1UL<<SSP_SR_BSY_Pos)
+
+/* SSP_CPSR Clock prescale register. */
+#define SSP_CPSR_CPD_Pos        0           /* Clock prescale divisor. */
+#define SSP_CPSR_CPD_Msk        (0xFF<<SSP_CPSR_CDP_Pos)
+
+#define SSP_CPSR_DFLT        0x0008      /* Clock prescale (use with SCR), default set at 8. */
+
+/* SSPIMSC Interrupt mask set and clear register. */
+#define SSP_IMSC_RORIM_Pos         0           /* Receive overrun not Masked. */
+#define SSP_IMSC_RORIM_Msk         (1UL<<SSP_IMSC_RORIM_Pos)
+#define SSP_IMSC_RTIM_Pos          1           /* Receive timeout not Masked. */
+#define SSP_IMSC_RTIM_Msk          (1UL<<SSP_IMSC_RTIM_Pos)
+#define SSP_IMSC_RXIM_Pos          2           /* Receive  FIFO not Masked.   */
+#define SSP_IMSC_RXIM_Msk          (1UL<<SSP_IMSC_RXIM_Pos)
+#define SSP_IMSC_TXIM_Pos          3           /* Transmit FIFO not Masked.   */
+#define SSP_IMSC_TXIM_Msk          (1UL<<SSP_IMSC_TXIM_Pos)
+
+/* SSPRIS Raw interrupt status register. */
+#define SSP_RIS_RORRIS_Pos         0           /* Raw Overrun  interrupt flag. */
+#define SSP_RIS_RORRIS_Msk         (1UL<<SSP_RIS_RORRIS_Pos)
+#define SSP_RIS_RTRIS_Pos          1           /* Raw Timemout interrupt flag. */
+#define SSP_RIS_RTRIS_Msk          (1UL<<SSP_RIS_RTRIS_Pos)
+#define SSP_RIS_RXRIS_Pos          2           /* Raw Receive  interrupt flag. */
+#define SSP_RIS_RXRIS_Msk          (1UL<<SSP_RIS_RXRIS_Pos)
+#define SSP_RIS_TXRIS_Pos          3           /* Raw Transmit interrupt flag. */
+#define SSP_RIS_TXRIS_Msk          (1UL<<SSP_RIS_TXRIS_Pos)
+
+/* SSPMIS Masked interrupt status register. */
+#define SSP_MIS_RORMIS_Pos         0           /* Masked Overrun  interrupt flag. */
+#define SSP_MIS_RORMIS_Msk         (1UL<<SSP_MIS_RORMIS_Pos)
+#define SSP_MIS_RTMIS_Pos          1           /* Masked Timemout interrupt flag. */
+#define SSP_MIS_RTMIS_Msk          (1UL<<SSP_MIS_RTMIS_Pos)
+#define SSP_MIS_RXMIS_Pos          2           /* Masked Receive  interrupt flag. */
+#define SSP_MIS_RXMIS_Msk          (1UL<<SSP_MIS_RXMIS_Pos)
+#define SSP_MIS_TXMIS_Pos          3           /* Masked Transmit interrupt flag. */
+#define SSP_MIS_TXMIS_Msk          (1UL<<SSP_MIS_TXMIS_Pos)
+
+/* SSPICR Interrupt clear register. */
+#define SSP_ICR_RORIC_Pos           0           /* Clears Overrun  interrupt flag. */
+#define SSP_ICR_RORIC_Msk           (1UL<<SSP_ICR_RORIC_Pos)
+#define SSP_ICR_RTIC_Pos            1           /* Clears Timemout interrupt flag. */
+#define SSP_ICR_RTIC_Msk            (1UL<<SSP_ICR_RTIC_Pos)
+
+/* SSPDMACR DMA control register. */
+#define SSP_DMACR_RXDMAE_Pos        0           /* Enable Receive  FIFO DMA. */
+#define SSP_DMACR_RXDMAE_Msk        (1UL<<SSP_DMACR_RXDMAE_Pos)
+#define SSP_DMACR_TXDMAE_Pos        1           /* Enable Transmit FIFO DMA. */
+#define SSP_DMACR_TXDMAE_Msk        (1UL<<SSP_DMACR_TXDMAE_Pos)
+
+/******************************************************************************/
+/*               Audio and Touch Screen (I2C) Peripheral declaration          */
+/******************************************************************************/
+
+typedef struct
+{
+  union {
+  __O   uint32_t  CONTROLS;     /* Offset: 0x000 CONTROL Set Register     ( /W). */
+  __I   uint32_t  CONTROL;      /* Offset: 0x000 CONTROL Status Register  (R/ ). */
+  };
+  __O    uint32_t  CONTROLC;     /* Offset: 0x004 CONTROL Clear Register  ( /W). */
+} MPS3_I2C_TypeDef;
+
+#define SDA                1 << 1
+#define SCL                1 << 0
+
+
+/******************************************************************************/
+/*               Audio I2S Peripheral declaration                             */
+/******************************************************************************/
+
+typedef struct
+{
+  /*!< Offset: 0x000 CONTROL Register    (R/W) */
+  __IO   uint32_t  CONTROL;  /* <h> CONTROL </h>
+                              *   <o.0> TX Enable
+                              *     <0=> TX disabled
+                              *     <1=> TX enabled
+                              *   <o.1> TX IRQ Enable
+                              *     <0=> TX IRQ disabled
+                              *     <1=> TX IRQ enabled
+                              *   <o.2> RX Enable
+                              *     <0=> RX disabled
+                              *     <1=> RX enabled
+                              *   <o.3> RX IRQ Enable
+                              *     <0=> RX IRQ disabled
+                              *     <1=> RX IRQ enabled
+                              *   <o.10..8> TX Buffer Water Level
+                              *     <0=> / IRQ triggers when any space available
+                              *     <1=> / IRQ triggers when more than 1 space available
+                              *     <2=> / IRQ triggers when more than 2 space available
+                              *     <3=> / IRQ triggers when more than 3 space available
+                              *     <4=> Undefined!
+                              *     <5=> Undefined!
+                              *     <6=> Undefined!
+                              *     <7=> Undefined!
+                              *   <o.14..12> RX Buffer Water Level
+                              *     <0=> Undefined!
+                              *     <1=> / IRQ triggers when less than 1 space available
+                              *     <2=> / IRQ triggers when less than 2 space available
+                              *     <3=> / IRQ triggers when less than 3 space available
+                              *     <4=> / IRQ triggers when less than 4 space available
+                              *     <5=> Undefined!
+                              *     <6=> Undefined!
+                              *     <7=> Undefined!
+                              *   <o.16> FIFO reset
+                              *     <0=> Normal operation
+                              *     <1=> FIFO reset
+                              *   <o.17> Audio Codec reset
+                              *     <0=> Normal operation
+                              *     <1=> Assert audio Codec reset
+                              */
+  /*!< Offset: 0x004 STATUS Register     (R/ ) */
+  __I    uint32_t  STATUS;   /* <h> STATUS </h>
+                              *   <o.0> TX Buffer alert
+                              *     <0=> TX buffer don't need service yet
+                              *     <1=> TX buffer need service
+                              *   <o.1> RX Buffer alert
+                              *     <0=> RX buffer don't need service yet
+                              *     <1=> RX buffer need service
+                              *   <o.2> TX Buffer Empty
+                              *     <0=> TX buffer have data
+                              *     <1=> TX buffer empty
+                              *   <o.3> TX Buffer Full
+                              *     <0=> TX buffer not full
+                              *     <1=> TX buffer full
+                              *   <o.4> RX Buffer Empty
+                              *     <0=> RX buffer have data
+                              *     <1=> RX buffer empty
+                              *   <o.5> RX Buffer Full
+                              *     <0=> RX buffer not full
+                              *     <1=> RX buffer full
+                              */
+  union {
+   /*!< Offset: 0x008 Error Status Register (R/ ) */
+    __I    uint32_t  ERROR;  /* <h> ERROR </h>
+                              *   <o.0> TX error
+                              *     <0=> Okay
+                              *     <1=> TX overrun/underrun
+                              *   <o.1> RX error
+                              *     <0=> Okay
+                              *     <1=> RX overrun/underrun
+                              */
+   /*!< Offset: 0x008 Error Clear Register  ( /W) */
+    __O    uint32_t  ERRORCLR; /* <h> ERRORCLR </h>
+                                *   <o.0> TX error
+                                *     <0=> Okay
+                                *     <1=> Clear TX error
+                                *   <o.1> RX error
+                                *     <0=> Okay
+                                *     <1=> Clear RX error
+                                */
+    };
+   /*!< Offset: 0x00C Divide ratio Register (R/W) */
+  __IO   uint32_t  DIVIDE;  /* <h> Divide ratio for Left/Right clock </h>
+                             *   <o.9..0> TX error (default 0x80)
+                             */
+   /*!< Offset: 0x010 Transmit Buffer       ( /W) */
+  __O    uint32_t  TXBUF;  /* <h> Transmit buffer </h>
+                            *   <o.15..0> Right channel
+                            *   <o.31..16> Left channel
+                            */
+
+   /*!< Offset: 0x014 Receive Buffer        (R/ ) */
+  __I    uint32_t  RXBUF;  /* <h> Receive buffer </h>
+                            *   <o.15..0> Right channel
+                            *   <o.31..16> Left channel
+                            */
+         uint32_t  RESERVED1[186];
+  __IO uint32_t ITCR;        /* <h> Integration Test Control Register </h>
+                              *   <o.0> ITEN
+                              *     <0=> Normal operation
+                              *     <1=> Integration Test mode enable
+                              */
+  __O  uint32_t ITIP1;       /* <h> Integration Test Input Register 1</h>
+                              *   <o.0> SDIN
+                              */
+  __O  uint32_t ITOP1;       /* <h> Integration Test Output Register 1</h>
+                              *   <o.0> SDOUT
+                              *   <o.1> SCLK
+                              *   <o.2> LRCK
+                              *   <o.3> IRQOUT
+                              */
+} MPS3_I2S_TypeDef;
+
+#define I2S_CONTROL_TXEN_Pos        0
+#define I2S_CONTROL_TXEN_Msk        (1UL<<I2S_CONTROL_TXEN_Pos)
+
+#define I2S_CONTROL_TXIRQEN_Pos     1
+#define I2S_CONTROL_TXIRQEN_Msk     (1UL<<I2S_CONTROL_TXIRQEN_Pos)
+
+#define I2S_CONTROL_RXEN_Pos        2
+#define I2S_CONTROL_RXEN_Msk        (1UL<<I2S_CONTROL_RXEN_Pos)
+
+#define I2S_CONTROL_RXIRQEN_Pos     3
+#define I2S_CONTROL_RXIRQEN_Msk     (1UL<<I2S_CONTROL_RXIRQEN_Pos)
+
+#define I2S_CONTROL_TXWLVL_Pos      8
+#define I2S_CONTROL_TXWLVL_Msk      (7UL<<I2S_CONTROL_TXWLVL_Pos)
+
+#define I2S_CONTROL_RXWLVL_Pos      12
+#define I2S_CONTROL_RXWLVL_Msk      (7UL<<I2S_CONTROL_RXWLVL_Pos)
+/* FIFO reset. */
+#define I2S_CONTROL_FIFORST_Pos     16
+#define I2S_CONTROL_FIFORST_Msk     (1UL<<I2S_CONTROL_FIFORST_Pos)
+/* Codec reset. */
+#define I2S_CONTROL_CODECRST_Pos    17
+#define I2S_CONTROL_CODECRST_Msk    (1UL<<I2S_CONTROL_CODECRST_Pos)
+
+#define I2S_STATUS_TXIRQ_Pos        0
+#define I2S_STATUS_TXIRQ_Msk        (1UL<<I2S_STATUS_TXIRQ_Pos)
+
+#define I2S_STATUS_RXIRQ_Pos        1
+#define I2S_STATUS_RXIRQ_Msk        (1UL<<I2S_STATUS_RXIRQ_Pos)
+
+#define I2S_STATUS_TXEmpty_Pos      2
+#define I2S_STATUS_TXEmpty_Msk      (1UL<<I2S_STATUS_TXEmpty_Pos)
+
+#define I2S_STATUS_TXFull_Pos       3
+#define I2S_STATUS_TXFull_Msk       (1UL<<I2S_STATUS_TXFull_Pos)
+
+#define I2S_STATUS_RXEmpty_Pos      4
+#define I2S_STATUS_RXEmpty_Msk      (1UL<<I2S_STATUS_RXEmpty_Pos)
+
+#define I2S_STATUS_RXFull_Pos       5
+#define I2S_STATUS_RXFull_Msk       (1UL<<I2S_STATUS_RXFull_Pos)
+
+#define I2S_ERROR_TXERR_Pos         0
+#define I2S_ERROR_TXERR_Msk         (1UL<<I2S_ERROR_TXERR_Pos)
+
+#define I2S_ERROR_RXERR_Pos         1
+#define I2S_ERROR_RXERR_Msk         (1UL<<I2S_ERROR_RXERR_Pos)
+
+/******************************************************************************/
+/*                       SMSC9220 Register Definitions                        */
+/******************************************************************************/
+
+typedef struct                         /*   SMSC LAN9220                                  */
+{
+__I   uint32_t  RX_DATA_PORT;          /*   Receive FIFO Ports (offset 0x0).              */
+      uint32_t  RESERVED1[0x7];
+__O   uint32_t  TX_DATA_PORT;          /*   Transmit FIFO Ports (offset 0x20).            */
+      uint32_t  RESERVED2[0x7];
+
+__I   uint32_t  RX_STAT_PORT;          /*   Receive FIFO status port (offset 0x40).       */
+__I   uint32_t  RX_STAT_PEEK;          /*   Receive FIFO status peek (offset 0x44).       */
+__I   uint32_t  TX_STAT_PORT;          /*   Transmit FIFO status port (offset 0x48).      */
+__I   uint32_t  TX_STAT_PEEK;          /*   Transmit FIFO status peek (offset 0x4C).      */
+
+__I   uint32_t  ID_REV;                /*   Chip ID and Revision (offset 0x50).           */
+__IO  uint32_t  IRQ_CFG;               /*   Main Interrupt Configuration (offset 0x54).   */
+__IO  uint32_t  INT_STS;               /*   Interrupt Status (offset 0x58).               */
+__IO  uint32_t  INT_EN;                /*   Interrupt Enable Register (offset 0x5C).      */
+      uint32_t  RESERVED3;             /*   Reserved for future use (offset 0x60).        */
+__I   uint32_t  BYTE_TEST;             /*   Read-only byte order testing register 87654321h (offset 0x64). */
+__IO  uint32_t  FIFO_INT;              /*   FIFO Level Interrupts (offset 0x68).          */
+__IO  uint32_t  RX_CFG;                /*   Receive Configuration (offset 0x6C).          */
+__IO  uint32_t  TX_CFG;                /*   Transmit Configuration (offset 0x70).         */
+__IO  uint32_t  HW_CFG;                /*   Hardware Configuration (offset 0x74).         */
+__IO  uint32_t  RX_DP_CTL;             /*   RX Datapath Control (offset 0x78).            */
+__I   uint32_t  RX_FIFO_INF;           /*   Receive FIFO Information (offset 0x7C).       */
+__I   uint32_t  TX_FIFO_INF;           /*   Transmit FIFO Information (offset 0x80).      */
+__IO  uint32_t  PMT_CTRL;              /*   Power Management Control (offset 0x84).       */
+__IO  uint32_t  GPIO_CFG;              /*   General Purpose IO Configuration (offset 0x88). */
+__IO  uint32_t  GPT_CFG;               /*   General Purpose Timer Configuration (offset 0x8C). */
+__I   uint32_t  GPT_CNT;               /*   General Purpose Timer Count (offset 0x90).    */
+      uint32_t  RESERVED4;             /*   Reserved for future use (offset 0x94).        */
+__IO  uint32_t  ENDIAN;                /*   WORD SWAP Register (offset 0x98).             */
+__I   uint32_t  FREE_RUN;              /*   Free Run Counter (offset 0x9C).               */
+__I   uint32_t  RX_DROP;               /*   RX Dropped Frames Counter (offset 0xA0).      */
+__IO  uint32_t  MAC_CSR_CMD;           /*   MAC CSR Synchronizer Command (offset 0xA4).   */
+__IO  uint32_t  MAC_CSR_DATA;          /*   MAC CSR Synchronizer Data (offset 0xA8).      */
+__IO  uint32_t  AFC_CFG;               /*   Automatic Flow Control Configuration (offset 0xAC). */
+__IO  uint32_t  E2P_CMD;               /*   EEPROM Command (offset 0xB0).                 */
+__IO  uint32_t  E2P_DATA;              /*   EEPROM Data (offset 0xB4).                    */
+
+} SMSC9220_TypeDef;
+
+/* SMSC9220 MAC Registers       Indices. */
+#define SMSC9220_MAC_CR         0x1
+#define SMSC9220_MAC_ADDRH      0x2
+#define SMSC9220_MAC_ADDRL      0x3
+#define SMSC9220_MAC_HASHH      0x4
+#define SMSC9220_MAC_HASHL      0x5
+#define SMSC9220_MAC_MII_ACC    0x6
+#define SMSC9220_MAC_MII_DATA   0x7
+#define SMSC9220_MAC_FLOW       0x8
+#define SMSC9220_MAC_VLAN1      0x9
+#define SMSC9220_MAC_VLAN2      0xA
+#define SMSC9220_MAC_WUFF       0xB
+#define SMSC9220_MAC_WUCSR      0xC
+
+/* SMSC9220 PHY Registers       Indices. */
+#define SMSC9220_PHY_BCONTROL   0x0
+#define SMSC9220_PHY_BSTATUS    0x1
+#define SMSC9220_PHY_ID1        0x2
+#define SMSC9220_PHY_ID2        0x3
+#define SMSC9220_PHY_ANEG_ADV   0x4
+#define SMSC9220_PHY_ANEG_LPA   0x5
+#define SMSC9220_PHY_ANEG_EXP   0x6
+#define SMSC9220_PHY_MCONTROL   0x17
+#define SMSC9220_PHY_MSTATUS    0x18
+#define SMSC9220_PHY_CSINDICATE 0x27
+#define SMSC9220_PHY_INTSRC     0x29
+#define SMSC9220_PHY_INTMASK    0x30
+#define SMSC9220_PHY_CS         0x31
+
+/******************************************************************************/
+/*                         Peripheral declaration                             */
+/******************************************************************************/
+
+#define MPS3_TS_I2C             ((MPS3_I2C_TypeDef      *) MPS3_I2C0_BASE )
+#define MPS3_AAIC_I2C           ((MPS3_I2C_TypeDef      *) MPS3_I2C1_BASE )
+#define MPS3_CAM_I2C2           ((MPS3_I2C_TypeDef      *) MPS3_I2C2_BASE )
+#define MPS3_CAM_I2C3           ((MPS3_I2C_TypeDef      *) MPS3_I2C3_BASE )
+#define MPS3_AAIC_I2S           ((MPS3_I2S_TypeDef      *) MPS3_AAIC_I2S_BASE )
+#define MPS3_FPGAIO             ((MPS3_FPGAIO_TypeDef   *) MPS3_FPGAIO_BASE )
+#define MPS3_SCC                ((MPS3_SCC_TypeDef      *) MPS3_SCC_BASE )
+#define MPS3_SSP0               ((MPS3_SSP_TypeDef      *) MPS3_SSP0_BASE )
+#define MPS3_SSP1               ((MPS3_SSP_TypeDef      *) MPS3_SSP1_BASE )
+#define MPS3_SSP2               ((MPS3_SSP_TypeDef      *) MPS3_SSP2_BASE )
+#define MPS3_SSP3               ((MPS3_SSP_TypeDef      *) MPS3_SSP3_BASE )
+#define MPS3_SSP4               ((MPS3_SSP_TypeDef      *) MPS3_SSP4_BASE )
+#define SMSC9220                ((SMSC9220_TypeDef      *) SMSC9220_BASE)
+
+/******************************************************************************/
+/*                      Secure Peripheral declaration                         */
+/******************************************************************************/
+
+#define SEC_TS_I2C             ((MPS3_I2C_TypeDef      *) SEC_MPS3_I2C0_BASE )
+#define SEC_AAIC_I2C           ((MPS3_I2C_TypeDef      *) SEC_MPS3_I2C1_BASE )
+#define SEC_AAIC_I2S           ((MPS3_I2S_TypeDef      *) SEC_MPS3_AAIC_I2S_BASE )
+#define SEC_FPGAIO             ((MPS3_FPGAIO_TypeDef   *) SEC_MPS3_FPGAIO_BASE )
+#define SEC_SCC                ((MPS3_SCC_TypeDef      *) SEC_MPS3_SCC_BASE )
+#define SEC_SSP0               ((MPS3_SSP_TypeDef      *) SEC_SSP0_BASE )
+#define SEC_SSP1               ((MPS3_SSP_TypeDef      *) SEC_SSP1_BASE )
+#define SEC_SSP2               ((MPS3_SSP_TypeDef      *) SEC_MPS3_SSP2_BASE )
+#define SEC_SMSC9220           ((SMSC9220_TypeDef      *) SEC_SMSC9220_BASE)
+
+#endif /* SMM_MPS3_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/timer_mps3.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/timer_mps3.h
new file mode 100644
index 0000000..14d64e5
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/include/timer_mps3.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TIMER_MPS3_H
+#define TIMER_MPS3_H
+
+#include <stdint.h>
+#include <time.h>
+
+/* Container for timestamp up-counters. */
+typedef struct _mps3_time_counter {
+    uint32_t    counter_1Hz;
+    uint32_t    counter_100Hz;
+
+    /* Running at FPGA clock rate. See GetMPS3CoreClock(). */
+    uint32_t    counter_fpga;
+
+    /* Running at processor core's internal clock rate, triggered by SysTick. */
+    uint64_t    counter_systick;
+} mps3_time_counter;
+
+/**
+ * @brief   Resets the counters.
+ */
+void timer_reset(void);
+
+/**
+ * @brief   Gets the current counter values.
+ * @returns Mps3 timer counter.
+ **/
+mps3_time_counter get_time_counter(void);
+
+/**
+ * @brief       Gets the duration elapsed between two counters in milliseconds.
+ * @param[in]   start   Pointer to mps3_time_counter value at start time.
+ * @param[in]   end     Pointer to mps3_time_counter value at end.
+ * @returns     Difference in milliseconds between the two give counters 
+ *              expressed as an unsigned integer.
+ **/
+uint32_t get_duration_milliseconds(mps3_time_counter *start,
+                                   mps3_time_counter *end);
+
+/**
+ * @brief       Gets the duration elapsed between two counters in microseconds.
+ * @param[in]   start   Pointer to mps3_time_counter value at start time.
+ * @param[in]   end     Pointer to mps3_time_counter value at end.
+ * @returns     Difference in microseconds between the two give counters 
+ *              expressed as an unsigned integer.
+ **/
+uint32_t get_duration_microseconds(mps3_time_counter *start,
+                                   mps3_time_counter *end);
+
+/**
+ * @brief       Gets the cycle counts elapsed between start and end.
+ * @param[in]   start   Pointer to mps3_time_counter value at start time.
+ * @param[in]   end     Pointer to mps3_time_counter value at end.
+ * @return      Difference in counter values as 32 bit unsigned integer.
+ **/
+uint64_t get_cycle_count_diff(mps3_time_counter *start,
+                              mps3_time_counter *end);
+
+/**
+ * @brief   Enables or triggers cycle counting mechanism, if required
+ *          by the platform.
+ **/
+void start_cycle_counter(void);
+
+/**
+ * @brief   Stops cycle counting mechanism, if required by the platform.
+ **/
+void stop_cycle_counter(void);
+
+#endif /* TIMER_MPS3_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/timer_mps3.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/timer_mps3.c
new file mode 100644
index 0000000..0a3a8b1
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/timer_mps3.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "timer_mps3.h"
+
+#include "bsp_core_log.h"
+#include "device_mps3.h"
+
+void timer_reset(void)
+{
+    MPS3_FPGAIO->CLK1HZ   = 0;
+    MPS3_FPGAIO->CLK100HZ = 0;
+    MPS3_FPGAIO->COUNTER  = 0;
+
+    if (0 != Init_SysTick()) {
+        printf_err("Failed to initialise system tick config\n");
+    }
+    debug("system tick config ready\n");
+}
+
+mps3_time_counter get_time_counter(void)
+{
+    mps3_time_counter t = {
+        .counter_1Hz        = MPS3_FPGAIO->CLK1HZ,
+        .counter_100Hz      = MPS3_FPGAIO->CLK100HZ,
+        .counter_fpga       = MPS3_FPGAIO->COUNTER,
+        .counter_systick    = Get_SysTick_Cycle_Count()
+    };
+    debug("Timestamp:\
+        \n\tCounter 1 Hz:   %u\
+        \n\tCounter 100 Hz: %u\
+        \n\tCounter FPGA:   %u\
+        \n\tCounter CPU:    %llu\n",
+        t.counter_1Hz, t.counter_100Hz, t.counter_fpga, t.counter_systick);
+    return t;
+}
+
+/**
+ * Please note, that there are no checks for overflow in this function => if
+ * the time elapsed has been big (in days) this could happen and is currently
+ * not handled.
+ **/
+uint32_t get_duration_milliseconds(mps3_time_counter *start,
+                                   mps3_time_counter *end)
+{
+    uint32_t time_elapsed = 0;
+    if (end->counter_100Hz > start->counter_100Hz) {
+        time_elapsed = (end->counter_100Hz - start->counter_100Hz) * 10;
+    } else {
+        time_elapsed = (end->counter_1Hz - start->counter_1Hz) * 1000 +
+            ((0xFFFFFFFF - start->counter_100Hz) + end->counter_100Hz + 1) * 10;
+    }
+
+    /* If the time elapsed is less than 100ms, use microseconds count to be
+     * more precise */
+    if (time_elapsed < 100) {
+        debug("Using the microsecond function instead..\n");
+        return get_duration_microseconds(start, end)/1000;
+    }
+
+    return time_elapsed;
+}
+
+/**
+ * Like the microsecond counterpart, this function could return wrong results when
+ * the counter (MAINCLK) overflows. There are no overflow counters available.
+ **/
+uint32_t get_duration_microseconds(mps3_time_counter *start,
+                                   mps3_time_counter *end)
+{
+    const int divisor = GetMPS3CoreClock()/1000000;
+    uint32_t time_elapsed = 0;
+    if (end->counter_fpga > start->counter_fpga) {
+        time_elapsed = (end->counter_fpga - start->counter_fpga)/divisor;
+    } else {
+        time_elapsed = ((0xFFFFFFFF - end->counter_fpga)
+            + start->counter_fpga + 1)/divisor;
+    }
+    return time_elapsed;
+}
+
+uint64_t get_cycle_count_diff(mps3_time_counter *start,
+                              mps3_time_counter *end)
+{
+    if (start->counter_systick > end->counter_systick) {
+        warn("start > end; counter might have overflown\n");
+    }
+    return end->counter_systick - start->counter_systick;
+}
+
+void start_cycle_counter(void)
+{
+    /* Nothing to do for FPGA */
+}
+
+void stop_cycle_counter(void)
+{
+    /* Nothing to do for FPGA */
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/uart_stdout.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/uart_stdout.c
new file mode 100644
index 0000000..1bf8291
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/mps3/uart_stdout.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "uart_stdout.h"
+
+#include "device_mps3.h"
+
+#include <stdio.h>
+
+#define CNTLQ       0x11
+#define CNTLS       0x13
+#define DEL         0x7F
+#define BACKSPACE   0x08
+#define CR          0x0D
+#define LF          0x0A
+#define ESC         0x1B
+
+void UartStdOutInit(void)
+{
+    /* NOTE: SystemCoreClock should have been set before initialising UART. */
+    CMSDK_UART0->BAUDDIV = SystemCoreClock / 115200;   /* => (25 or 32 MHz) / (115200 bps). */
+    CMSDK_UART0->CTRL    = ((1ul <<  0) |              /* TX enable. */
+                            (1ul <<  1) );             /* RX enable. */
+    return;
+}
+
+unsigned char UartPutc(unsigned char my_ch)
+{
+    while ((CMSDK_UART0->STATE & 1)); /* Wait if Transmit Holding register is full. */
+
+    if (my_ch == '\n') {
+        CMSDK_UART0->DATA  = '\r';
+        while ((CMSDK_UART0->STATE & 1)); /* Wait if Transmit Holding register is full. */
+    }
+
+    CMSDK_UART0->DATA = my_ch; /* Write to transmit holding register. */
+    return (my_ch);
+}
+
+unsigned char UartGetc(void)
+{
+    unsigned char my_ch;
+    unsigned int  cnt;
+
+    /* Wait if Receive Holding register is empty. */
+    while (0 == (CMSDK_UART0->STATE & 2)) {
+        cnt = MPS3_FPGAIO->CLK100HZ / 50;
+        if (cnt & 0x8) {
+            MPS3_FPGAIO->LED = 0x01 << (cnt & 0x7);
+        }
+        else {
+            MPS3_FPGAIO->LED = 0x80 >> (cnt & 0x7);
+        }
+    }
+
+    my_ch = CMSDK_UART0->DATA;
+
+    /* Convert CR to LF. */
+    if(my_ch == '\r') {
+        my_ch = '\n';
+    }
+
+    return (my_ch);
+}
+
+bool GetLine(char *lp, unsigned int len)
+{
+    unsigned int cnt = 0;
+    char c;
+
+    do {
+        c = UartGetc ();
+        switch (c) {
+            case CNTLQ:                       /* Ignore Control S/Q.            */
+            case CNTLS:
+                break;
+
+            case BACKSPACE:
+            case DEL:
+                if (cnt == 0) {
+                    break;
+                }
+                cnt--;                         /* Decrement count.               */
+                lp--;                          /* Decrement line pointer.        */
+                UartPutc (0x08);               /* Echo backspace.                */
+                UartPutc (' ');
+                UartPutc (0x08);
+                fflush (stdout);
+                break;
+
+            case ESC:
+            case 0:
+                *lp = 0;                        /* ESC - stop editing line.       */
+                return false;
+
+            case CR:                            /* CR - done, stop editing line.  */
+                *lp = c;
+                lp++;                           /* Increment line pointer         */
+                cnt++;                          /* and count.                     */
+                c = LF;
+            default:
+                UartPutc (*lp = c);             /* Echo and store character.      */
+                fflush (stdout);
+                lp++;                           /* Increment line pointer         */
+                    cnt++;                      /* and count.                     */
+                break;
+        }
+    } while (cnt < len - 2  &&  c != LF);       /* Check limit and CR.            */
+    *lp = 0;                                    /* Mark end of string.            */
+
+    return true;
+}
+
+void UartEndSimulation(int code)
+{
+    UartPutc((char) 0x4);   /* End of simulation */
+    UartPutc((char) code);  /* End of simulation */
+    while(1);
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/include/stubs_fvp.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/include/stubs_fvp.h
new file mode 100644
index 0000000..a21f2d2
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/include/stubs_fvp.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BSP_PACK_FASTMODEL_H
+#define BSP_PACK_FASTMODEL_H
+
+#include "cmsis.h"                  /* device specific header file    */
+#include "peripheral_memmap.h"      /* peripheral memory map definitions */
+
+/****************************************************************************/
+/*              Definitions and stub functions for modules currently        */
+/*              unavailable on the model                                    */
+/****************************************************************************/
+#define GLCD_WIDTH      320
+#define GLCD_HEIGHT     240
+#define Black           0x0000      /*   0,   0,   0 */
+#define White           0xFFFF      /* 255, 255, 255 */
+
+/*********************** Clock related functions *****************************/
+uint32_t GetCoreClock(void);
+
+/************************  GLCD related functions ****************************/
+/**
+ * @brief      Initialize the Himax LCD with HX8347-D LCD Controller
+ * @return     none
+ */
+void GLCD_Initialize(void);
+
+/**
+ * @brief      Display graphical bitmap image at position x horizontally and y
+ *             vertically. This function is optimized for 16 bits per pixel
+ *             format, it has to be adapted for any other format.
+ * @param[in]  x        horizontal position.
+ * @param[in]  y        vertical position.
+ * @param[in]  w        width of bitmap.
+ * @param[in]  h        height of bitmap.
+ * @param[in]  bitmap   address at which the bitmap data resides.
+ * @return     none
+ */
+void GLCD_Bitmap(unsigned int x,  unsigned int y,
+                unsigned int w, unsigned int h,
+                unsigned short *bitmap);
+
+/**
+ * @brief Displays an 8 bit image, conversion to the LCD's
+ *        16 bit codec is done on the fly.
+ * @param[in] data      pointer to the full sized image data.
+ * @param[in] width     image width.
+ * @param[in] height    image height.
+ * @param[in] channels  number of channels in the image.
+ * @param[in] pos_x     start x position for the LCD.
+ * @param[in] pos_y     start y position for the LCD.
+ * @param[in] downsample_factor   factor by which the image
+ *                                is downsampled by.
+ * @return none
+ */
+void GLCD_Image(void *data, const uint32_t width,
+                const uint32_t height, const uint32_t channels,
+                const uint32_t pos_x, const uint32_t pos_y,
+                const uint32_t downsample_factor);
+
+/**
+ * @brief      Clear display
+ * @param[in]  color    display clearing color
+ * @return     none
+ */
+void GLCD_Clear(unsigned short color);
+
+/**
+ * @brief      Set foreground color
+ * @param[in]  color    foreground color
+ * @return     none
+ */
+void GLCD_SetTextColor(unsigned short color);
+
+/**
+ * @brief      Display character on given line
+ * @param[in]  ln    line number
+ * @param[in]  col   column number
+ * @param[in]  fi    font index (0 = 9x15)
+ * @param[in]  c     ASCII character
+ * @return     none
+ */
+void GLCD_DisplayChar(unsigned int ln, unsigned int col,
+                    unsigned char fi, unsigned char  c);
+
+/**
+ * @brief      Display string on given line
+ * @param[in]  ln    line number
+ * @param[in]  col   column number
+ * @param[in]  fi    font index (0 = 9x15)
+ * @param[in]  s     pointer to string
+ * @return     none
+ */
+void GLCD_DisplayString(unsigned int ln, unsigned int col,
+                        unsigned char fi, char *s);
+
+/**
+ * @brief      Draw box filled with color
+ * @param[in]  x        horizontal position
+ * @param[in]  y:       vertical position
+ * @param[in]  w:       window width in pixels
+ * @param[in]  h:       window height in pixels
+ * @param[in]  color    box color
+ * @return     none
+ */
+void GLCD_Box(unsigned int x, unsigned int y,
+            unsigned int w, unsigned int h,
+            unsigned short color);
+
+#endif /* BSP_PACK_FASTMODEL_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/include/timer_fvp.h b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/include/timer_fvp.h
new file mode 100644
index 0000000..c07a4eb
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/include/timer_fvp.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TIMER_FVP_H
+#define TIMER_FVP_H
+
+#include "stubs_fvp.h"
+
+/* Container for timestamp for fastmodel. */
+typedef struct _fvp_time_counter {
+    uint64_t    counter_systick;
+} fvp_time_counter;
+
+/**
+ * @brief   Resets the counters.
+ */
+void timer_reset(void);
+
+/**
+ * @brief   Gets the current counter values.
+ * @returns counter struct.
+ **/
+fvp_time_counter get_time_counter(void);
+
+/**
+ * @brief   Gets the cycle counts elapsed between start and end.
+ * @return  difference in counter values as 32 bit unsigned integer.
+ */
+uint64_t get_cycle_count_diff(fvp_time_counter *start, fvp_time_counter *end);
+
+/**
+ * @brief   Enables or triggers cycle counting mechanism, if required
+ *          by the platform.
+ */
+void start_cycle_counter(void);
+
+/**
+ * @brief   Stops cycle counting mechanism, if required by the platform.
+ */
+void stop_cycle_counter(void);
+
+#endif /* TIMER_FVP_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/stubs_fvp.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/stubs_fvp.c
new file mode 100644
index 0000000..e5b2969
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/stubs_fvp.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "stubs_fvp.h"
+
+#include "bsp_core_log.h"
+
+uint32_t GetCoreClock(void)
+{
+    return 1;
+}
+
+void GLCD_Initialize(void) {}
+
+void GLCD_Bitmap(unsigned int x,  unsigned int y,
+    unsigned int w, unsigned int h, unsigned short *bitmap)
+{
+    UNUSED(x);
+    UNUSED(y);
+    UNUSED(w);
+    UNUSED(h);
+    UNUSED(bitmap);
+}
+
+void GLCD_Image(void *data, const uint32_t width, const uint32_t height,
+    const uint32_t channels, const uint32_t pos_x,
+    const uint32_t pos_y, const uint32_t downsample_factor)
+{
+    UNUSED(data);
+    UNUSED(pos_x);
+    UNUSED(pos_y);
+    UNUSED(width);
+    UNUSED(height);
+    UNUSED(channels);
+    UNUSED(downsample_factor);
+    debug("image display: (x, y, w, h) = (%u, %u, %u, %u)\n",
+        pos_x, pos_y, width, height);
+    debug("image display: channels = %u, downsample factor = %u\n",
+        channels, downsample_factor);
+}
+
+void GLCD_Clear(unsigned short color)
+{
+    UNUSED(color);
+}
+
+void GLCD_SetTextColor(unsigned short color)
+{
+    UNUSED(color);
+}
+
+void GLCD_DisplayChar (unsigned int ln, unsigned int col, unsigned char fi,
+    unsigned char c)
+{
+    UNUSED(ln);
+    UNUSED(col);
+    UNUSED(fi);
+    UNUSED(c);
+}
+
+void GLCD_DisplayString(unsigned int ln, unsigned int col, unsigned char fi,
+    char *s)
+{
+    UNUSED(ln);
+    UNUSED(col);
+    UNUSED(fi);
+    UNUSED(s);
+    debug("text display: %s\n", s);
+}
+
+void GLCD_Box(unsigned int x, unsigned int y, unsigned int w, unsigned int h,
+    unsigned short color)
+{
+    UNUSED(x);
+    UNUSED(y);
+    UNUSED(w);
+    UNUSED(h);
+    UNUSED(color);
+}
+
+void LED_Initialize(uint32_t port)
+{
+    UNUSED(port);
+}
+
+void LED_On(uint32_t num, uint32_t port)
+{
+    UNUSED(num);
+    UNUSED(port);
+    debug("LED %u ON\n", num);
+}
+
+void LED_Off(uint32_t num, uint32_t port)
+{
+    UNUSED(num);
+    UNUSED(port);
+    debug("LED %u OFF\n", num);
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/timer_fvp.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/timer_fvp.c
new file mode 100644
index 0000000..b7a7232
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/timer_fvp.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "timer_fvp.h"
+
+#include "irqs.h"
+#include "bsp_core_log.h"
+
+fvp_time_counter get_time_counter(void)
+{
+    fvp_time_counter t = {
+        .counter_systick = Get_SysTick_Cycle_Count()
+    };
+    debug("counter_systick: %llu\n", t.counter_systick);
+    return t;
+}
+
+void timer_reset(void)
+{
+    if (0 != Init_SysTick()) {
+        printf_err("Failed to initialise system tick config\n");
+    }
+    debug("system tick config ready\n");
+}
+
+uint64_t get_cycle_count_diff(fvp_time_counter *start,
+                              fvp_time_counter *end)
+{
+    if (start->counter_systick > end->counter_systick) {
+        warn("start > end; counter might have overflown\n");
+    }
+    return end->counter_systick - start->counter_systick;
+}
+
+void start_cycle_counter(void)
+{
+    /* Add any custom requirement for this platform here */
+}
+
+void stop_cycle_counter(void)
+{
+    /* Add any custom requirement for this platform here */
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/uart_pl011.c b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/uart_pl011.c
new file mode 100644
index 0000000..5c1ee06
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/bsp-packs/simple_platform/uart_pl011.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "uart_stdout.h"
+#include "peripheral_memmap.h"      /* peripheral memory map definitions */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#define CNTLQ       0x11
+#define CNTLS       0x13
+#define DEL         0x7F
+#define BACKSPACE   0x08
+#define CR          0x0D
+#define LF          0x0A
+#define ESC         0x1B
+
+#define UARTBASE    (PL011_UART0_BASE)
+
+/*****************************************************************************/
+/*  UART Control Register Locations                                          */
+/*****************************************************************************/
+#define UART0_DR   *((volatile unsigned *) UARTBASE)
+#define UART0_RSR  *((volatile unsigned *)(UARTBASE + 0x04))
+#define UART0_ECR  *((volatile unsigned *)(UARTBASE + 0x04))
+#define UART0_LCRH *((volatile unsigned *)(UARTBASE + 0x2C))
+#define UART0_LCRM *((volatile unsigned *)(UARTBASE + 0x28))
+#define UART0_LCRL *((volatile unsigned *)(UARTBASE + 0x24))
+#define UART0_CR   *((volatile unsigned *)(UARTBASE + 0x30))
+#define UART0_FR   *((volatile unsigned *)(UARTBASE + 0x18))
+#define UART0_IIR  *((volatile unsigned *)(UARTBASE + 0x1C))
+#define UART0_ICR  *((volatile unsigned *)(UARTBASE + 0x44))
+
+/*****************************************************************************/
+/* Received Status Register - RSR                                            */
+/*****************************************************************************/
+#define RSR_OVERRUN_ERROR   0x08
+#define RSR_BREAK_ERROR     0x04
+#define RSR_PARITY_ERROR    0x02
+#define RSR_FRAMING_ERROR   0x01
+
+/*****************************************************************************/
+/* Line Control High Byte Register - LCRH                                    */
+/*****************************************************************************/
+#define LCRH_WORD_LENGTH_8  0x60
+#define LCRH_WORD_LENGTH_7  0x40
+#define LCRH_WORD_LENGTH_6  0x20
+#define LCRH_WORD_LENGTH_5  0x00
+#define LCRH_FIFO_ENABLED   0x10
+#define LCRH_2_STOP_BITS    0x08
+#define LCRH_EVEN_PARITY    0x04
+#define LCRH_PARITY_ENABLE  0x02
+#define LCRH_SEND_BREAK     0x01
+
+/*****************************************************************************/
+/* Line Control Medium Byte Register - LCRM                                  */
+/* This register specifies the high byte of the Baud rate divisor            */
+/*****************************************************************************/
+#define LCRM_BAUD_460800  0x00
+#define LCRM_BAUD_230400  0x00
+#define LCRM_BAUD_115200  0x00
+#define LCRM_BAUD_76800   0x00
+#define LCRM_BAUD_57600   0x00
+#define LCRM_BAUD_38400   0x00
+#define LCRM_BAUD_19200   0x00
+#define LCRM_BAUD_14400   0x00
+#define LCRM_BAUD_9600    0x00
+#define LCRM_BAUD_2400    0x01
+#define LCRM_BAUD_1200    0x02
+
+/*****************************************************************************/
+/* Line Control Low Byte Register - LCRL                                     */
+/* This register specifies the low byte of the Baud rate divisor             */
+/*****************************************************************************/
+#define LCRL_BAUD_460800  0x01
+#define LCRL_BAUD_230400  0x03
+#define LCRL_BAUD_115200  0x07
+#define LCRL_BAUD_76800   0x0B
+#define LCRL_BAUD_57600   0x0F
+#define LCRL_BAUD_38400   0xC
+#define LCRL_BAUD_19200   0x2F
+#define LCRL_BAUD_14400   0x3F
+#define LCRL_BAUD_9600    0x5F
+#define LCRL_BAUD_2400    0x7F
+#define LCRL_BAUD_1200    0xFF
+
+/*****************************************************************************/
+/* Control Register - CR                                                     */
+/*****************************************************************************/
+#define CR_LOOP_BACK_EN   0x80
+#define CR_TIMEOUT_INT_EN 0x40
+#define CR_TX_INT_ENABLE  0x100
+#define CR_RX_INT_ENABLE  0x200
+#define CR_MODSTAT_INT_EN 0x08
+#define CR_UART_ENABLE    0x01
+
+/*****************************************************************************/
+/* Flag Register - FR                                                        */
+/*****************************************************************************/
+#define FR_TX_FIFO_EMPTY  0x80
+#define FR_RX_FIFO_FULL   0x40
+#define FR_TX_FIFO_FULL   0x20
+#define FR_RX_FIFO_EMPTY  0x10
+#define FR_BUSY           0x08
+#define FR_CARRIER_DETECT 0x04
+#define FR_SET_READY      0x02
+#define FR_CLEAR_TO_SEND  0x01
+
+/*****************************************************************************/
+/* Interrupt Identification Register - IIR                                   */
+/*****************************************************************************/
+#define IIR_RX_TIME_OUT   0x08
+#define IIR_TX            0x04
+#define IIR_RX            0x02
+#define IIR_MODEM         0x01
+
+void UartStdOutInit(void)
+{
+    /* Disable the serial port while setting the baud rate and word length. */
+    UART0_CR = 0;
+
+    /* Clear the receive status register. */
+    UART0_ECR = 0;
+
+    /* Set the correct baud rate and word length. */
+    UART0_LCRL = LCRL_BAUD_115200;
+    UART0_LCRM = LCRM_BAUD_115200;
+    UART0_LCRH = LCRH_WORD_LENGTH_8;
+
+    /* Explicitly disable FIFO's for char mode. */
+    UART0_LCRH &= ~LCRH_FIFO_ENABLED;
+
+    /* Enable UART0 (and RX/TX) without interrupts. */
+    UART0_CR = CR_UART_ENABLE | CR_TX_INT_ENABLE | CR_RX_INT_ENABLE;
+}
+
+unsigned char UartPutc(unsigned char ch)
+{
+    if (ch == '\n') {
+        (void) UartPutc('\r');
+    }
+    while (UART0_FR & FR_TX_FIFO_FULL)
+        ;
+    UART0_DR = ch;
+
+    return ch;
+}
+
+unsigned char UartGetc(void)
+{
+    unsigned char c;
+    while (UART0_FR & FR_RX_FIFO_EMPTY)
+        ;
+    c = UART0_DR;
+    if (c == '\r') {
+        c = '\n';
+    }
+
+    return c;
+}
+
+bool GetLine (char *lp, unsigned int len)
+{
+    unsigned int cnt = 0;
+    char c;
+
+    do {
+        c = UartGetc();
+        switch (c) {
+            case CNTLQ:                       /* ignore Control S/Q.            */
+            case CNTLS:
+                break;
+            case BACKSPACE:
+            case DEL:
+                if (cnt == 0) {
+                    break;
+                }
+                cnt--;                         /* decrement count.               */
+                lp--;                          /* and line pointer.              */
+                UartPutc (0x08);               /* echo backspace.                */
+                UartPutc (' ');
+                UartPutc (0x08);
+                fflush (stdout);
+                break;
+            case ESC:
+            case 0:
+                *lp = 0;                       /* ESC - stop editing line.       */
+                return false;
+            case CR:                           /* CR - done, stop editing line.  */
+                *lp = c;
+                lp++;                          /* increment line pointer.        */
+                cnt++;                         /* and count.                     */
+                c = LF;
+            default:
+                UartPutc (*lp = c);            /* echo and store character.      */
+                fflush (stdout);
+                lp++;                          /* increment line pointer.        */
+                cnt++;                         /* and count.                     */
+                break;
+        }
+    } while (cnt < len - 2  &&  c != LF);      /* check limit and CR.            */
+    *lp = 0;                                   /* mark end of string.            */
+    return true;
+}
+
+__attribute__((noreturn)) void UartEndSimulation(int code)
+{
+    UartPutc((char) 0x4);  // End of simulation
+    UartPutc((char) code); // Exit code
+    while(1);
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/cmsis-device/cmsis.c b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/cmsis.c
new file mode 100644
index 0000000..c9cf53d
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/cmsis.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "cmsis.h"
+
+extern void *__Vectors;                   /* see irqs.c */
+
+/*----------------------------------------------------------------------------*\
+ *                        Define clocks (uses OSC1 ACLK)                      *
+\*----------------------------------------------------------------------------*/
+#define __XTAL            (25000000)      /* Oscillator frequency             */
+#define __SYSTEM_CLOCK    (__XTAL)
+
+#define STR(x) #x
+#define RESET_REG(n) __ASM volatile("MOV " STR(r##n) ", #0" : : : STR(r##n))
+
+#if defined(CPU_CORTEX_M55)
+#define CCR_DL   (1 << 19)
+#else
+#error  "Invalid CPU; This file only services Cortex-M55 CPUs"
+#endif /* (CPU_CORTEX_M55) */
+
+/*----------------------------------------------------------------------------
+  System Core Clock Variable (Core Clock)
+ *----------------------------------------------------------------------------*/
+uint32_t SystemCoreClock = __SYSTEM_CLOCK;
+
+
+/*----------------------------------------------------------------------------
+  Clock functions
+ *----------------------------------------------------------------------------*/
+/**
+ * @brief  Updates the SystemCoreClock variable with current core Clock
+ *         retrieved from cpu registers.
+ */
+void SystemCoreClockUpdate(void)
+{
+    /* Update the SystemCoreClock variable */
+    SystemCoreClock = __SYSTEM_CLOCK;
+}
+
+uint32_t GetSystemCoreClock(void)
+{
+    return SystemCoreClock;
+}
+
+/**
+ * @brief  Setup the microcontroller system.
+ *         Initialize the System.
+ **/
+void SystemInit(void)
+{
+#if (defined (__FPU_USED) && (__FPU_USED == 1U)) || \
+    (defined (__MVE_USED) && (__MVE_USED == 1U))
+  SCB->CPACR |= ((3U << 10U*2U) |   /* enable CP10 Full Access */
+                 (3U << 11U*2U) );
+#endif
+
+    /* Initialise registers r0-r12 and LR(=r14)
+     * They must have a valid value before being potentially pushed to stack by
+     * C calling convention or by context saving in exception handling
+     */
+    RESET_REG(0);
+    RESET_REG(1);
+    RESET_REG(2);
+    RESET_REG(3);
+    RESET_REG(4);
+    RESET_REG(5);
+    RESET_REG(6);
+    RESET_REG(7);
+    RESET_REG(8);
+    RESET_REG(9);
+    RESET_REG(10);
+    RESET_REG(11);
+    RESET_REG(12);
+    RESET_REG(14);
+
+#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
+  SCB->VTOR = (uint32_t) &__Vectors;
+#endif
+
+    /* Enable hard, bus, mem and usage fault detection in SHCSR, bits 16-18.
+     * Enable stkof, bf, div_0_trp, unalign_trp and usersetm bits in CCR.
+     */
+    SCB->SHCSR = (
+        _VAL2FLD(SCB_SHCSR_USGFAULTENA, 1) |
+        _VAL2FLD(SCB_SHCSR_BUSFAULTENA, 1) |
+        _VAL2FLD(SCB_SHCSR_MEMFAULTENA, 1));
+
+    SCB->CCR = (_VAL2FLD(SCB_CCR_USERSETMPEND, 1) |
+                _VAL2FLD(SCB_CCR_DIV_0_TRP, 1)    |
+                _VAL2FLD(SCB_CCR_BFHFNMIGN, 1)    |
+                _VAL2FLD(SCB_CCR_STKOFHFNMIGN, 1));
+#ifdef UNALIGNED_SUPPORT_DISABLE
+    SCB->CCR |= _VAL2FLD(SCB_CCR_UNALIGN_TRP, 1);
+#endif
+
+    SCB->CCR |= CCR_DL;
+
+  /* Reset pipeline. */
+  __DSB();
+  __ISB();
+
+#ifdef UNALIGNED_SUPPORT_DISABLE
+  SCB->CCR |= SCB_CCR_UNALIGN_TRP_Msk;
+#endif
+
+  SystemCoreClock = __SYSTEM_CLOCK;
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/cmsis-device/include/cmsis.h b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/include/cmsis.h
new file mode 100644
index 0000000..969db15
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/include/cmsis.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BAREMETAL_CMSIS_H
+#define BAREMETAL_CMSIS_H
+
+#include "ARMCM55.h"  /* Cortex M system header file from CMSIS. */
+#include "irqs.h"     /* Interrupt definitions file. */
+
+/* Addition to template functions should be mentioned here. */
+
+/**
+ * @brief   Gets the internal processor clock.
+ * @return  Clock frequency as unsigned 32 bit value.
+ **/
+uint32_t GetSystemCoreClock(void);
+
+#endif  /* BAREMETAL_CMSIS_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/cmsis-device/include/irqs.h b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/include/irqs.h
new file mode 100644
index 0000000..0d8dec6
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/include/irqs.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef IRQS_H
+#define IRQS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "peripheral_irqs.h"
+
+#include <stdint.h>
+
+/* Interrupt handler function type. */
+typedef void (*const irq_vec_type)(void);
+
+/**
+ *  @brief  Reset interrupt handler and also, the starting
+ *          point of the application.
+ **/
+extern void Reset_Handler(void);
+
+/**
+ * @brief   Gets the system tick triggered cycle counter for the CPU.
+ * @return  64-bit counter value.
+ **/
+extern uint64_t Get_SysTick_Cycle_Count(void);
+
+/**
+ * @brief   Initialises the system tick registers.
+ * @return  Error code return from sys tick configuration function
+ *          (0 = no error).
+ **/
+extern int Init_SysTick(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* IRQS_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/cmsis-device/irqs.c b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/irqs.c
new file mode 100644
index 0000000..c6f54b1
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/cmsis-device/irqs.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "irqs.h"
+#include "cmsis.h"
+
+#include <stdio.h>
+
+static uint64_t cpu_cycle_count = 0;
+
+/**
+ * @brief   Dump core registers on stdout
+ */
+static void LogCoreCPURegisters(void)
+{
+    printf("CTRL    : 0x%08x\n", __get_CONTROL());
+    printf("IPSR    : 0x%08x\n", __get_IPSR());
+    printf("APSR    : 0x%08x\n", __get_APSR());
+    printf("xPSR    : 0x%08x\n", __get_xPSR());
+    printf("PSP     : 0x%08x\n", __get_PSP());
+    printf("MSP     : 0x%08x\n", __get_MSP());
+    printf("PRIMASK : 0x%08x\n", __get_PRIMASK());
+    printf("BASEPRI : 0x%08x\n", __get_BASEPRI());
+    printf("FAULTMSK: 0x%08x\n", __get_FAULTMASK());
+    printf("PC      : 0x%08x\n", __current_pc());
+}
+
+/**
+ * @brief   Default interrupt handler - an infinite loop.
+ **/
+__attribute__((noreturn)) static void DefaultHandler(void)
+{
+    LogCoreCPURegisters();
+    while (1) {
+        /* Without the following line, armclang may optimize away the
+         * infinite loop because it'd be without side effects and thus
+         * undefined behaviour. */
+        __ASM volatile("");
+    }
+}
+
+#define DEFAULT_HANDLER_CALL(type)              \
+    do {                                        \
+        printf("\n%s caught by function %s\n",  \
+             type, __FUNCTION__);               \
+        DefaultHandler();                       \
+    } while (0)
+
+#define DEFAULT_ERROR_HANDLER_CALL()            \
+            DEFAULT_HANDLER_CALL("Exception")
+
+#define DEFAULT_IRQ_HANDLER_CALL()              \
+            DEFAULT_HANDLER_CALL("Interrupt")
+
+/**
+ * Dummy Exception Handlers for core interrupts.
+ *
+ * Weak definitions provided to be used if the user chooses not
+ * to override them.
+ **/
+
+/**
+ * @brief  Non maskable interrupt handler.
+ **/
+ __attribute__((weak)) void NMI_Handler(void)
+{
+    DEFAULT_ERROR_HANDLER_CALL();
+}
+
+/**
+ * @brief  Hardfault interrupt handler.
+ **/
+ __attribute__((weak)) void HardFault_Handler(void)
+{
+    DEFAULT_ERROR_HANDLER_CALL();
+}
+
+/**
+ * @brief  Memory management interrupt handler.
+ **/
+__attribute__((weak)) void MemManage_Handler(void)
+{
+    DEFAULT_IRQ_HANDLER_CALL();
+}
+
+/**
+ * @brief  Bus fault interrupt handler.
+ **/
+__attribute__((weak)) void BusFault_Handler(void)
+{
+    DEFAULT_ERROR_HANDLER_CALL();
+}
+
+/**
+ * @brief  Usage fault interrupt handler.
+ **/
+__attribute__((weak)) void UsageFault_Handler(void)
+{
+    DEFAULT_ERROR_HANDLER_CALL();
+}
+
+/**
+ * @brief  Secure access fault interrupt handler.
+ **/
+__attribute__((weak)) void SecureFault_Handler(void)
+{
+    DEFAULT_ERROR_HANDLER_CALL();
+}
+
+/**
+ * @brief  Supervisor call interrupt handler.
+ **/
+__attribute__((weak)) void SVC_Handler(void)
+{
+    DEFAULT_IRQ_HANDLER_CALL();
+}
+
+/**
+ * @brief  Debug monitor interrupt handler.
+ **/
+__attribute__((weak)) void DebugMon_Handler(void)
+{
+    DEFAULT_IRQ_HANDLER_CALL();
+}
+
+/**
+ * @brief  Pending SV call interrupt handler.
+ */
+__attribute__((weak)) void PendSV_Handler(void)
+{
+    DEFAULT_IRQ_HANDLER_CALL();
+}
+
+/**
+ * @brief   System tick interrupt handler.
+ **/
+void SysTick_Handler(void)
+{
+    /* Increment the cycle counter based on load value. */
+    cpu_cycle_count += SysTick->LOAD + 1;
+}
+
+uint64_t Get_SysTick_Cycle_Count(void)
+{
+    uint32_t systick_val;
+
+    NVIC_DisableIRQ(SysTick_IRQn);
+    systick_val = SysTick->VAL & SysTick_VAL_CURRENT_Msk;
+    NVIC_EnableIRQ(SysTick_IRQn);
+
+    return cpu_cycle_count + (SysTick->LOAD - systick_val);
+}
+
+
+/**
+ * These symbols are provided by the ARM lib - needs the stack and heap
+ * regions in the scatter file.
+ */
+extern void Image$$ARM_LIB_STACK$$ZI$$Base();
+extern void Image$$ARM_LIB_STACK$$ZI$$Limit();
+extern void Image$$ARM_LIB_HEAP$$ZI$$Base();
+extern void Image$$ARM_LIB_HEAP$$ZI$$Limit();
+extern __attribute__((noreturn)) void __main();
+
+__attribute__((naked, used)) void __user_setup_stackheap()
+{
+    __ASM volatile("LDR  r0, =Image$$ARM_LIB_HEAP$$ZI$$Base");
+    __ASM volatile("LDR  r1, =Image$$ARM_LIB_STACK$$ZI$$Limit");
+    __ASM volatile("LDR  r2, =Image$$ARM_LIB_HEAP$$ZI$$Limit");
+    __ASM volatile("LDR  r3, =Image$$ARM_LIB_STACK$$ZI$$Base");
+    __ASM volatile("bx   lr");
+}
+
+/**
+ * Interrupt vector table.
+ */
+irq_vec_type __Vectors[] __attribute__((section("RESET"), used)) = {
+    &Image$$ARM_LIB_STACK$$ZI$$Limit,  /* 0 Initial SP */
+    &Reset_Handler      , /* 1 Initial PC, set to entry point */
+
+    &NMI_Handler        , /* 2 (-14) NMI Handler            */
+    &HardFault_Handler  , /* 3 (-13) Hard Fault Handler     */
+    &MemManage_Handler  , /* 4 (-12) MPU Fault Handler      */
+    &BusFault_Handler   , /* 5 (-11) Bus Fault Handler      */
+    &UsageFault_Handler , /* 6 (-10) Usage Fault Handler    */
+    &SecureFault_Handler, /* 7 ( -9) Secure Fault Handler   */
+    0                   , /* 8 ( -8) Reserved               */
+    0                   , /* 9 ( -7) Reserved               */
+    0                   , /* 10 ( -6) Reserved              */
+    &SVC_Handler        , /* 11 ( -5) SVCall Handler        */
+    &DebugMon_Handler   , /* 12 ( -4) Debug Monitor Handler */
+    0                   , /* 13 ( -3) Reserved              */
+    &PendSV_Handler     , /* 14 ( -2) PendSV Handler        */
+    &SysTick_Handler    , /* 15 ( -1) SysTick Handler       */
+
+    /* External sources to be populated by user. */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*   0 -  16 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*  16 -  32 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*  32 -  48 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*  48 -  64 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*  64 -  80 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*  80 -  96 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*  96 -  112 */
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 112 -  128 */
+};
+
+int Init_SysTick(void)
+{
+    const uint32_t ticks_10ms = GetSystemCoreClock()/100 + 1;
+    int err = 0;
+
+    /* Reset CPU cycle count value. */
+    cpu_cycle_count = 0;
+
+    /* Changing configuration for sys tick => guard from being
+     * interrupted. */
+    NVIC_DisableIRQ(SysTick_IRQn);
+
+    /* SysTick init - this will enable interrupt too. */
+    err = SysTick_Config(ticks_10ms);
+
+    /* Enable interrupt again. */
+    NVIC_EnableIRQ(SysTick_IRQn);
+
+    return err;
+}
+
+/* Reset handler - starting point of our application. */
+__attribute__((used)) void Reset_Handler(void)
+{
+    /* Initialise system. */
+    SystemInit();
+
+    /* Configure the system tick. */
+    Init_SysTick();
+
+    /* libcxx supplied entry point. */
+    __main();
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/application/hal/platforms/bare-metal/bsp/include/bsp.h b/source/application/hal/platforms/bare-metal/bsp/include/bsp.h
new file mode 100644
index 0000000..fbe1ff6
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/include/bsp.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BSP_H
+#define BSP_H
+
+/* Core modules - these are common */
+#include "bsp_core_log.h"   /* Logging related helpers. */
+#include "uart_stdout.h"    /* stdout over UART. */
+
+#if defined(MPS3_PLATFORM) /* If running on MPS3 platform. */
+
+#include "smm_mps3.h"       /* Mem map for MPS3 peripherals. */
+#include "glcd_mps3.h"      /* LCD functions. */
+#include "timer_mps3.h"     /* Timer functions. */
+#include "device_mps3.h"    /* FPGA level definitions and functions. */
+
+#else /* MPS3_PLATFORM */
+
+#include "stubs_fvp.h"      /* Stubs for FVP. */
+#include "timer_fvp.h"      /* Timer API for FVP. */
+
+#endif /* MPS3_PLATFORM */
+
+#endif /* BSP_H */
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-200.sct b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-200.sct
new file mode 100644
index 0000000..293193e
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-200.sct
@@ -0,0 +1,102 @@
+;  Copyright (c) 2021 Arm Limited. All rights reserved.
+;  SPDX-License-Identifier: Apache-2.0
+;
+;  Licensed under the Apache License, Version 2.0 (the "License");
+;  you may not use this file except in compliance with the License.
+;  You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+;  Unless required by applicable law or agreed to in writing, software
+;  distributed under the License is distributed on an "AS IS" BASIS,
+;  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;  See the License for the specific language governing permissions and
+;  limitations under the License.
+
+; *************************************************************
+; ***       Scatter-Loading Description File                ***
+; *************************************************************
+;
+; Sections used:
+;---------------------------------------------------------
+; |    Start    |      End    |    Size     |   Remarks  |
+;-|-------------|-------------|-------------|------------|
+; | 0x0000_0000 | 0x0010_0000 | 0x0010_0000 | ITCM (RO)  |
+; | 0x0010_0000 | 0x0030_0000 | 0x0020_0000 | BRAM (RW)  |
+; | 0x2000_0000 | 0x2040_0000 | 0x0040_0000 | DTCM (RW)  |
+; | 0x6000_0000 | 0x6200_0000 | 0x0200_0000 | DRAM (RW)  |
+;-|-------------|-------------|-------------|------------|
+; ITCM is aliased at 0x1000_0000 (single bank)
+; BRAM is aliased at 0x1010_0000
+; DTCM is aliased at 0x3000_0000 (four banks of 1MiB each)
+; DRAM is aliased at 0x7000_0000 (section is 256MiB)
+;
+; Note: Ethos-U55 can only access DRAM and BRAM sections
+;---------------------------------------------------------
+; First load region
+;---------------------------------------------------------
+LOAD_REGION_0       0x00000000                  0x00100000
+{
+    ;-----------------------------------------------------
+    ; First part of code mem - 1MiB
+    ;-----------------------------------------------------
+    itcm.bin        0x00000000                  0x00100000
+    {
+        *.o (RESET, +First)
+        * (InRoot$$Sections)
+        .ANY (+RO)
+    }
+
+    ;-----------------------------------------------------
+    ; Code memory's 2MiB - reserved for activation buffers
+    ; Make sure this is uninitialised.
+    ;-----------------------------------------------------
+    bram.bin        0x00100000  UNINIT          0x00200000
+    {
+        ; activation buffers a.k.a tensor arena
+        *.o (.bss.NoInit.activation_buf)
+    }
+
+    ;-----------------------------------------------------
+    ; 1MiB bank is used for any other RW or ZI data
+    ; Note: this region is internal to the Cortex-M CPU
+    ;-----------------------------------------------------
+    dtcm.bin        0x20000000                  0x00100000
+    {
+        .ANY(+RW +ZI)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of stack space within SRAM region
+    ;-----------------------------------------------------
+    ARM_LIB_STACK   0x20100000 EMPTY ALIGN 8    0x00020000
+    {}
+
+    ;-----------------------------------------------------
+    ; 2MiB of heap space within the SRAM region
+    ;-----------------------------------------------------
+    ARM_LIB_HEAP    0x20200000 EMPTY ALIGN 8    0x00200000
+    {}
+}
+
+;---------------------------------------------------------
+; Second load region
+;---------------------------------------------------------
+LOAD_REGION_1       0x60000000                  0x02000000
+{
+    ;-----------------------------------------------------
+    ; 32 MiB of DRAM space for nn model and input vectors
+    ;-----------------------------------------------------
+    dram.bin        0x60000000                  0x02000000
+    {
+        ; nn model's baked in input matrices
+        *.o (ifm)
+
+        ; nn model
+        *.o (nn_model)
+
+        ; if the activation buffer (tensor arena) doesn't
+        ; fit in the SRAM region, we accommodate it here
+        *.o (activation_buf)
+    }
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.sct b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.sct
new file mode 100644
index 0000000..327d511
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.sct
@@ -0,0 +1,118 @@
+;  Copyright (c) 2021 Arm Limited. All rights reserved.
+;  SPDX-License-Identifier: Apache-2.0
+;
+;  Licensed under the Apache License, Version 2.0 (the "License");
+;  you may not use this file except in compliance with the License.
+;  You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+;  Unless required by applicable law or agreed to in writing, software
+;  distributed under the License is distributed on an "AS IS" BASIS,
+;  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;  See the License for the specific language governing permissions and
+;  limitations under the License.
+
+; *************************************************************
+; ***       Scatter-Loading Description File                ***
+; *************************************************************
+; Please see docs/sections/appendix.md for memory mapping information.
+;
+; Note: Ethos-U55 can access BRAM, internal SRAM and the DDR sections => activation buffers and
+;       the model should only be placed in those regions.
+;
+;---------------------------------------------------------
+; First load region (ITCM)
+;---------------------------------------------------------
+LOAD_REGION_0       0x00000000                  0x00080000
+{
+    ;-----------------------------------------------------
+    ; First part of code mem - 512kiB
+    ;-----------------------------------------------------
+    itcm.bin        0x00000000                  0x00080000
+    {
+        *.o (RESET, +First)
+        * (InRoot$$Sections)
+
+        ; Essentially only RO-CODE, RO-DATA is in a
+        ; different region.
+        .ANY (+RO)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of 512kiB DTCM is used for any other RW or ZI
+    ; data. Note: this region is internal to the Cortex-M
+    ; CPU.
+    ;-----------------------------------------------------
+    dtcm.bin        0x20000000                  0x00020000
+    {
+        ; Any R/W and/or zero initialised data
+        .ANY(+RW +ZI)
+    }
+
+    ;-----------------------------------------------------
+    ; 384kiB of stack space within the DTCM region. See
+    ; `dtcm.bin` for the first section. Note: by virtue of
+    ; being part of DTCM, this region is only accessible
+    ; from Cortex-M55.
+    ;-----------------------------------------------------
+    ARM_LIB_STACK   0x20020000 EMPTY ALIGN 8    0x00060000
+    {}
+
+    ;-----------------------------------------------------
+    ; SSE-300's internal SRAM of 4MiB - reserved for
+    ; activation buffers.
+    ; This region should have 3 cycle read latency from
+    ; both Cortex-M55 and Ethos-U55
+    ;-----------------------------------------------------
+    isram.bin       0x31000000  UNINIT ALIGN 16 0x00400000
+    {
+        ; activation buffers a.k.a tensor arena
+        *.o (.bss.NoInit.activation_buf)
+    }
+}
+
+;---------------------------------------------------------
+; Second load region (DDR)
+;---------------------------------------------------------
+LOAD_REGION_1       0x70000000                  0x02000000
+{
+    ;-----------------------------------------------------
+    ; 32 MiB of DRAM space for neural network model,
+    ; input vectors and labels. If the activation buffer
+    ; size required by the network is bigger than the
+    ; SRAM size available, it is accommodated here.
+    ;-----------------------------------------------------
+    dram.bin        0x70000000 ALIGN 16         0x02000000
+    {
+        ; nn model's baked in input matrices
+        *.o (ifm)
+
+        ; nn model
+        *.o (nn_model)
+
+        ; labels
+        *.o (labels)
+
+        ; if the activation buffer (tensor arena) doesn't
+        ; fit in the SRAM region, we accommodate it here
+        *.o (activation_buf)
+    }
+
+    ;-----------------------------------------------------
+    ; First 256kiB of BRAM (FPGA SRAM) used for RO data.
+    ; Note: Total BRAM size available is 2MiB.
+    ;-----------------------------------------------------
+    bram.bin        0x11000000          ALIGN 8 0x00040000
+    {
+        ; RO data (incl. unwinding tables for debugging)
+        .ANY (+RO-DATA)
+    }
+
+    ;-----------------------------------------------------
+    ; Remaining part of the 2MiB BRAM used as heap space.
+    ; 0x00200000 - 0x00040000 = 0x001C0000 (1.75 MiB)
+    ;-----------------------------------------------------
+    ARM_LIB_HEAP    0x11040000 EMPTY ALIGN 8    0x001C0000
+    {}
+}
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.sct b/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.sct
new file mode 100644
index 0000000..a1ffb49
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.sct
@@ -0,0 +1,102 @@
+;  Copyright (c) 2021 Arm Limited. All rights reserved.
+;  SPDX-License-Identifier: Apache-2.0
+;
+;  Licensed under the Apache License, Version 2.0 (the "License");
+;  you may not use this file except in compliance with the License.
+;  You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+;  Unless required by applicable law or agreed to in writing, software
+;  distributed under the License is distributed on an "AS IS" BASIS,
+;  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;  See the License for the specific language governing permissions and
+;  limitations under the License.
+
+; *************************************************************
+; ***       Scatter-Loading Description File                ***
+; *************************************************************
+;
+;---------------------------------------------------------
+; First load region (ITCM)
+;---------------------------------------------------------
+LOAD_REGION_0       0x00000000                  0x00080000
+{
+    ;-----------------------------------------------------
+    ; First part of code mem - 512kiB
+    ;-----------------------------------------------------
+    itcm.bin        0x00000000                  0x00080000
+    {
+        *.o (RESET, +First)
+        * (InRoot$$Sections)
+
+        ; Essentially only RO-CODE, RO-DATA is in a
+        ; different region.
+        .ANY (+RO)
+    }
+
+    ;-----------------------------------------------------
+    ; BRAM or FPGA data SRAM region worth 2MiB
+    ;-----------------------------------------------------
+    bram.bin        0x11000000  UNINIT ALIGN 16 0x00200000
+    {
+        ; activation buffers a.k.a tensor arena
+        *.o (.bss.NoInit.activation_buf)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of 512kiB bank is used for any other RW or ZI
+    ; data. Note: this region is internal to the Cortex-M
+    ; CPU
+    ;-----------------------------------------------------
+    dtcm.bin        0x20000000                  0x00020000
+    {
+        .ANY(+RW +ZI)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of stack space within the DTCM region
+    ;-----------------------------------------------------
+    ARM_LIB_STACK   0x20020000 EMPTY ALIGN 8    0x00020000
+    {}
+
+    ;-----------------------------------------------------
+    ; 256kiB of heap space within the DTCM region
+    ;-----------------------------------------------------
+    ARM_LIB_HEAP    0x20040000 EMPTY ALIGN 8    0x00040000
+    {}
+}
+
+;---------------------------------------------------------
+; Second load region (DDR)
+;---------------------------------------------------------
+LOAD_REGION_1       0x70000000                  0x02000000
+{
+    ;-----------------------------------------------------
+    ; 32 MiB of DRAM space for nn model and input vectors
+    ;-----------------------------------------------------
+    dram.bin        0x70000000 ALIGN 16         0x02000000
+    {
+        ; nn model's baked in input matrices
+        *.o (ifm)
+
+        ; nn model
+        *.o (nn_model)
+
+        ; if the activation buffer (tensor arena) doesn't
+        ; fit in the SRAM region, we accommodate it here
+        *.o (activation_buf)
+    }
+
+    ;-----------------------------------------------------
+    ; SSE-300's internal SRAM of 2MiB - reserved for
+    ; activation buffers.
+    ; This region should have 3 cycle read latency from
+    ; both Cortex-M55 and Ethos-U55
+    ;-----------------------------------------------------
+    isram.bin       0x31000000                  0x00080000
+    {
+        ; RO data (incl. unwinding tables for debugging)
+        .ANY (+RO-DATA)
+    }
+}
diff --git a/source/application/hal/platforms/bare-metal/data_acquisition/data_acq.c b/source/application/hal/platforms/bare-metal/data_acquisition/data_acq.c
new file mode 100644
index 0000000..1e40b02
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/data_acquisition/data_acq.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "data_acq.h"
+
+#include "bsp.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+/**
+ * @brief       Get the user input from USART.
+ * @param[out]  user_input  String read from the UART block.
+ * @param[in]   size        String read length.
+ * @return      0 if successful, error code otherwise.
+ **/
+static int get_uart_user_input(char* user_input, int size)
+{
+    if (true != GetLine(user_input, size - 1)) {
+        printf_err("invalid input\n");
+        return 1;
+    }
+    return 0;
+}
+
+int data_acq_channel_init(data_acq_module* module)
+{
+    assert(module);
+
+    /* UART should have been initialised with low level initialisation
+     * routines. */
+    module->system_init = NULL;
+
+    strncpy(module->system_name, "UART", sizeof(module->system_name));
+    module->get_input = get_uart_user_input;
+    module->inited = 1;
+
+    return !(module->inited);
+}
+
+int data_acq_channel_release(data_acq_module* module)
+{
+    assert(module);
+    module->inited = 0;
+    module->get_input = NULL;
+    return 0;
+}
diff --git a/source/application/hal/platforms/bare-metal/data_presentation/data_psn.c b/source/application/hal/platforms/bare-metal/data_presentation/data_psn.c
new file mode 100644
index 0000000..474d552
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/data_presentation/data_psn.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "data_psn.h"
+
+#include "bsp.h"
+#include "lcd_img.h"
+
+#include <assert.h>
+#include <string.h>
+
+int data_psn_system_init(data_psn_module* module)
+{
+    assert(module);
+
+    /* LCD output supported. */
+    module->system_init = lcd_init;
+    module->present_data_image = lcd_display_image;
+    module->present_data_text = lcd_display_text;
+    module->present_box = lcd_display_box;
+    module->set_text_color = lcd_set_text_color;
+    module->clear = lcd_clear;
+    strncpy(module->system_name, "lcd", sizeof(module->system_name));
+    module->inited =  !module->system_init();
+    return !module->inited;
+}
+
+int data_psn_system_release(data_psn_module* module)
+{
+    assert(module);
+    module->inited = 0;
+    return 0;
+}
diff --git a/source/application/hal/platforms/bare-metal/data_presentation/lcd/include/lcd_img.h b/source/application/hal/platforms/bare-metal/data_presentation/lcd/include/lcd_img.h
new file mode 100644
index 0000000..e4ad791
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/data_presentation/lcd/include/lcd_img.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef LCD_IMG_H
+#define LCD_IMG_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+/**
+ * @brief   Initialise the LCD
+ * @return  0 if successful, error code otherwise.
+ **/
+int lcd_init(void);
+
+/**
+ * @brief       Display a given image on the LCD. This allows displaying 8 bit
+ *              single or multi-channel images on the LCD.
+ * @param[in]   data        Pointer to start of the image.
+ * @param[in]   width       Width of this image.
+ * @param[in]   height      Image height.
+ * @param[in]   channels    Number of channels.
+ * @param[in]   pos_x       Screen position x co-ordinate.
+ * @param[in]   pos_y       Screen position y co-ordinate.
+ * @param[in]   downsample_factor   Factor by which the image needs to be
+ *                                  downsampled.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int lcd_display_image(uint8_t* data, const uint32_t width,
+    const uint32_t height, const uint32_t channels,
+    const uint32_t pos_x, const uint32_t pos_y,
+    const uint32_t downsample_factor);
+
+/**
+ * @brief       Display a given image on the LCD. This allows displaying 8 bit
+ *              single or multi-channel images on the LCD.
+ * @param[in]   str         Pointer to a null terminated string.
+ * @param[in]   str_sz      Length of the string.
+ * @param[in]   pos_x       Screen position x co-ordinate.
+ * @param[in]   pos_y       Screen position y co-ordinate.
+ * @param[in]   allow_multiple_lines    The function will try and spread
+ *                                      the string into multiple lines if
+ *                                      they don't fit in one.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int lcd_display_text(const char* str, const size_t str_sz,
+     const uint32_t pos_x, const uint32_t pos_y,
+     const bool allow_multiple_lines);
+
+/**
+ * @brief       Display a box with given color on LCD.
+ * @param[in]   pos_x       Screen position x co-ordinate.
+ * @param[in]   pos_y       Screen position y co-ordinate.
+ * @param[in]   width       Width.
+ * @param[in]   height      Height.
+ * @param[in]   color       Fill color.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int lcd_display_box(const uint32_t pos_x, const uint32_t pos_y,
+    const uint32_t width, const uint32_t height, const uint16_t color);
+
+/**
+ * @brief       Clear LCD.
+ * @param[in]   color   Fill color.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int lcd_clear(const uint16_t color);
+
+/**
+ * @brief       Set text color.
+ * @param[in]   color   Fill color.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int lcd_set_text_color(const uint16_t color);
+
+#endif /* LCD_IMG_H */
diff --git a/source/application/hal/platforms/bare-metal/data_presentation/lcd/lcd_img.c b/source/application/hal/platforms/bare-metal/data_presentation/lcd/lcd_img.c
new file mode 100644
index 0000000..75f58fd
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/data_presentation/lcd/lcd_img.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "lcd_img.h"
+
+#include "bsp.h"
+
+#include <string.h>
+#include <assert.h>
+
+static int show_title(void)
+{
+    char title[128];
+    int status = 0;
+
+    /* LCD title string */
+#if defined(CPU_CORTEX_M55)
+    const char* cpu_name = "Arm Cortex-M55";
+#else /* defined(CPU_CORTEX_M55) */
+    const char* cpu_name = "Arm CPU";
+#endif /* defined(CPU_CORTEX_M55) */
+
+    lcd_set_text_color(White);
+
+    /* First line */
+    snprintf(title, sizeof(title), "Arm ML embedded code samples");
+
+    if (0 != (status = lcd_display_text(
+            title, strlen(title), 10, 0, false))) {
+        return status;
+    }
+
+    /* Second line */
+#if defined (ARM_NPU)
+    snprintf(title, sizeof(title), "%s + Arm Ethos-U55 NPU", cpu_name);
+#else /* defined (ARM_NPU) */
+    snprintf(title, sizeof(title), "%s", cpu_name);
+#endif /* defined (ARM_NPU) */
+
+    return lcd_display_text(title, strlen(title), 10, 20, false);
+}
+
+int lcd_init(void)
+{
+    GLCD_Initialize();
+    GLCD_Clear(Black);
+    return show_title();
+}
+
+int lcd_display_image(uint8_t* data, const uint32_t width,
+    const uint32_t height, const uint32_t channels,
+    const uint32_t pos_x, const uint32_t pos_y,
+    const uint32_t downsample_factor)
+{
+    /* Sanity checks */
+    assert(data);
+    if ((pos_x + width/downsample_factor > GLCD_WIDTH) ||
+            (pos_y + height/downsample_factor > GLCD_HEIGHT)) {
+        printf_err("Invalid image size for given location!\n");
+        return 1;
+    }
+
+    if (1 == channels || 3 == channels) {
+        GLCD_Image(data, width, height, channels, pos_x, pos_y,
+            downsample_factor);
+    } else {
+        printf_err("Only single and three channel images are supported!\n");
+        return 1;
+    }
+
+    return 0;
+}
+
+int lcd_display_text(const char* str, const size_t str_sz,
+    const uint32_t pos_x, const uint32_t pos_y,
+    const bool allow_multiple_lines)
+{
+    /* We use a font 0 which is 9x15. */
+    const uint32_t x_span =  9; /* Each character is this  9 pixels "wide". */
+    const uint32_t y_span = 15; /* Each character is this 15 pixels "high". */
+
+    if (str_sz == 0) {
+        return 1;
+    }
+
+    /* If not within the LCD bounds, return error. */
+    if (pos_x + x_span > GLCD_WIDTH || pos_y + y_span > GLCD_HEIGHT) {
+        return 1;
+    } else {
+        const unsigned char font_idx = 0; /* We are using the custom font = 0 */
+
+        const uint32_t col = pos_x/x_span;
+        const uint32_t max_cols = GLCD_WIDTH/x_span - 1;
+        const uint32_t max_lines = GLCD_HEIGHT/y_span - 1;
+
+        uint32_t i = 0;
+        uint32_t current_line = pos_y/y_span;
+        uint32_t current_col = col;
+
+        /* Display the string on the LCD. */
+        for (i = 0; i < str_sz; ++i) {
+
+            if (allow_multiple_lines) {
+
+                /* If the next character won't fit. */
+                if (current_col > max_cols) {
+                    current_col = col;
+
+                    /* If the next line won't fit. */
+                    if (++current_line  > max_lines) {
+                        return 1;
+                    }
+                }
+            }
+
+            GLCD_DisplayChar(current_line, current_col++, font_idx, str[i]);
+        }
+    }
+    return 0;
+}
+
+int lcd_display_box(const uint32_t pos_x, const uint32_t pos_y,
+    const uint32_t width, const uint32_t height, const uint16_t color)
+{
+    /* If not within the LCD bounds, return error. */
+    if (pos_x > GLCD_WIDTH || pos_y > GLCD_HEIGHT) {
+        return 1;
+    }
+    else {
+        GLCD_Box(pos_x, pos_y, width, height, color);
+    }
+    return 0;
+}
+
+int lcd_clear(const uint16_t color)
+{
+    GLCD_Clear(color);
+    GLCD_SetTextColor(White);
+    return show_title();
+}
+
+int lcd_set_text_color(const uint16_t color)
+{
+    GLCD_SetTextColor(color);
+    return 0;
+}
diff --git a/source/application/hal/platforms/bare-metal/timer/baremetal_timer.c b/source/application/hal/platforms/bare-metal/timer/baremetal_timer.c
new file mode 100644
index 0000000..7257c1d
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/timer/baremetal_timer.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "bsp.h"
+#include "timer.h"
+
+#include <assert.h>
+#include <string.h>
+
+#if defined (ARM_NPU)
+
+#include "pmu_ethosu.h"
+
+/**
+ * @brief Initialises the PMU and enables the cycle counter.
+ **/
+static void _init_ethosu_cyclecounter(void);
+
+/**
+ * @brief       Gets the difference of total NPU cycle counts.
+ *              (includes active and idle)
+ * @param[in]   st      Pointer to time_counter value at start time.
+ * @param[in]   end     Pointer to time_counter value at end.
+ * @return      Total NPU cycle counts difference between the arguments expressed
+ *              as unsigned 64 bit integer.
+ **/
+static uint64_t bm_get_npu_total_cycle_diff(time_counter *st,
+                                            time_counter *end);
+
+/** 
+ * @brief       Gets the difference in active NPU cycle counts.
+ * @param[in]   st      Pointer to time_counter value at start time.
+ * @param[in]   end     Pointer to time_counter value at end.
+ * @return      Active NPU cycle counts difference between the arguments expressed
+ *              as unsigned 64 bit integer.
+ **/
+static uint64_t bm_get_npu_active_cycle_diff(time_counter *st,
+                                             time_counter *end);
+
+#endif /* defined (ARM_NPU) */
+
+#if defined(MPS3_PLATFORM)
+/** 
+ * @brief       Wrapper for getting milliseconds duration between time counters
+ * @param[in]   st      Pointer to time_counter value at start time.
+ * @param[in]   end     Pointer to time_counter value at end.
+ * @return      Difference in milliseconds between given time counters.
+ **/
+static time_t bm_get_duration_ms(time_counter *st, time_counter *end);
+
+/**
+ * @brief       Wrapper for getting microseconds duration between time counters
+ * @param[in]   st      Pointer to time_counter value at start time.
+ * @param[in]   end     Pointer to time_counter value at end.
+ * @return      Difference in microseconds between given time counters.
+ **/
+static time_t bm_get_duration_us(time_counter *st, time_counter *end);
+#endif /* defined(MPS3_PLATFORM) */
+
+/**
+ * @brief Wrapper for resetting timer.
+ **/
+static void bm_timer_reset(void);
+
+/**
+ * @brief   Wrapper for getting the current timer counter.
+ * @return  Current time counter value.
+ **/
+static time_counter bm_get_time_counter(void);
+
+/**
+ * @brief   Wrapper for profiler start.
+ * @return  Current profiler start timer counter.
+ **/
+static time_counter bm_start_profiling(void);
+
+/**
+ * @brief   Wrapper for profiler end.
+ * @return  Current profiler end timer counter.
+ **/
+static time_counter bm_stop_profiling(void);
+
+/**
+ * @brief   Wrapper for getting CPU cycle difference between time counters.
+ * @return  CPU cycle difference between given time counters expressed
+ *          as unsigned 32 bit integer.
+ **/
+static uint32_t bm_get_cpu_cycles_diff(time_counter *st, time_counter *end);
+
+/**
+ * @brief       Initialiser for bare metal timer. 
+ * @param[in]   timer  Platform timer to initialize.
+ **/
+void init_timer(platform_timer *timer)
+{
+    assert(timer);
+    memset(timer, 0, sizeof(*timer));
+
+    timer->reset            = bm_timer_reset;
+    timer->get_time_counter = bm_get_time_counter;
+    timer->start_profiling  = bm_start_profiling;
+    timer->stop_profiling   = bm_stop_profiling;
+    timer->get_cpu_cycle_diff = bm_get_cpu_cycles_diff;
+    timer->cap.cpu_cycles = 1;
+
+#if defined (MPS3_PLATFORM)
+    timer->cap.duration_ms  = 1;
+    timer->cap.duration_us  = 1;
+    timer->get_duration_ms  = bm_get_duration_ms;
+    timer->get_duration_us  = bm_get_duration_us;
+#endif  /* defined (MPS3_PLATFORM) */
+
+#if defined (ARM_NPU)
+    /* We are capable of reporting npu cycle counts. */
+    timer->cap.npu_cycles   = 1;
+    timer->get_npu_total_cycle_diff = bm_get_npu_total_cycle_diff;
+    timer->get_npu_active_cycle_diff = bm_get_npu_active_cycle_diff;
+    _init_ethosu_cyclecounter();
+#endif /* defined (ARM_NPU) */
+
+    timer->reset();
+    timer->inited = 1;
+}
+
+#if defined (ARM_NPU)
+
+static void _reset_ethosu_counters(void)
+{
+    /* Reset all cycle and event counters. */
+    ETHOSU_PMU_CYCCNT_Reset();
+    ETHOSU_PMU_EVCNTR_ALL_Reset();
+}
+
+static void _init_ethosu_cyclecounter(void)
+{
+    /* Reset overflow status. */
+    ETHOSU_PMU_Set_CNTR_OVS(ETHOSU_PMU_CNT1_Msk | ETHOSU_PMU_CCNT_Msk);
+
+    /* Set the counter #0 to count idle cycles. */
+    ETHOSU_PMU_Set_EVTYPER(0, ETHOSU_PMU_NPU_IDLE);
+
+    /* Enable PMU. */
+    ETHOSU_PMU_Enable();
+
+    /* Enable counters for cycle and counter# 0. */
+    ETHOSU_PMU_CNTR_Enable(ETHOSU_PMU_CNT1_Msk | ETHOSU_PMU_CCNT_Msk);
+
+    _reset_ethosu_counters();
+}
+
+static uint64_t bm_get_npu_total_cycle_diff(time_counter *st, time_counter *end)
+{
+    return end->npu_total_ccnt - st->npu_total_ccnt;
+}
+
+static uint64_t bm_get_npu_active_cycle_diff(time_counter *st, time_counter *end)
+{
+    /* Check for overflow: The idle counter is 32 bit while the
+       total cycle count is 64 bit. */
+    const uint32_t overflow_status = ETHOSU_PMU_Get_CNTR_OVS();
+
+    if (ETHOSU_PMU_CNT1_Msk & overflow_status) {
+        printf_err("EthosU PMU idle counter overflow.\n");
+        return 0;
+    }
+
+    /* Active NPU time = total time - idle time */
+    return (bm_get_npu_total_cycle_diff(st, end) +
+           (uint64_t)(st->npu_idle_ccnt)) - (uint64_t)(end->npu_idle_ccnt);
+}
+
+#endif /* defined (ARM_NPU) */
+
+static void bm_timer_reset(void)
+{
+#if defined (ARM_NPU)
+    _init_ethosu_cyclecounter();
+#endif /* defined (ARM_NPU) */
+
+    timer_reset();
+}
+
+static time_counter bm_get_time_counter(void)
+{
+    time_counter t = {
+        .counter = get_time_counter(),
+
+#if defined (ARM_NPU)
+        .npu_idle_ccnt = ETHOSU_PMU_Get_EVCNTR(0),
+        .npu_total_ccnt = ETHOSU_PMU_Get_CCNTR()
+#endif /* defined (ARM_NPU) */
+
+    };
+
+#if defined (ARM_NPU)
+    debug("NPU total cc: %llu; NPU idle cc: %u\n",
+        t.npu_total_ccnt, t.npu_idle_ccnt);
+#endif /* defined (ARM_NPU) */
+
+    return t;
+}
+
+static time_counter bm_start_profiling(void)
+{
+    start_cycle_counter();
+    return bm_get_time_counter();
+}
+
+static time_counter bm_stop_profiling(void)
+{
+    stop_cycle_counter();
+    return bm_get_time_counter();
+}
+
+static uint32_t bm_get_cpu_cycles_diff(time_counter *st, time_counter *end)
+{
+    return get_cycle_count_diff(&(st->counter), &(end->counter));
+}
+
+#if defined(MPS3_PLATFORM)
+static time_t bm_get_duration_ms(time_counter *st, time_counter *end)
+{
+    return get_duration_milliseconds(&(st->counter), &(end->counter));
+}
+
+static time_t bm_get_duration_us(time_counter *st, time_counter *end)
+{
+    return get_duration_microseconds(&(st->counter), &(end->counter));
+}
+#endif /* defined(MPS3_PLATFORM) */
diff --git a/source/application/hal/platforms/bare-metal/timer/include/baremetal_timer.h b/source/application/hal/platforms/bare-metal/timer/include/baremetal_timer.h
new file mode 100644
index 0000000..c8fc32c
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/timer/include/baremetal_timer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BAREMETAL_TIMER_H
+#define BAREMETAL_TIMER_H
+
+#include <stdint.h>
+#include <time.h>
+
+#if defined (MPS3_PLATFORM)
+    #include "timer_mps3.h"
+    typedef mps3_time_counter   base_time_counter;
+#else /* defined (MPS3_PLATFORM) */
+    #include "timer_fvp.h"
+    typedef fvp_time_counter    base_time_counter;
+#endif  /* defined (MPS3_PLATFORM) */
+
+typedef struct bm_time_counter {
+    base_time_counter       counter;
+
+#if defined (ARM_NPU)
+    uint64_t                npu_total_ccnt;
+    uint32_t                npu_idle_ccnt;
+#endif /* ARM_NPU */
+
+} time_counter;
+
+#endif /* BAREMETAL_TIMER_H */
diff --git a/source/application/hal/platforms/bare-metal/utils/include/system_init.h b/source/application/hal/platforms/bare-metal/utils/include/system_init.h
new file mode 100644
index 0000000..84e0305
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/utils/include/system_init.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BAREMETAL_SYSTEM_INIT_H
+#define BAREMETAL_SYSTEM_INIT_H
+
+#include "bsp.h"
+
+/**
+ * @brief   Initialises the platform (MPS3 FPGA board or Fixed Virtual Platform)
+ *          Updates the system core clock and initialises the UART. It also
+ *          verifies that the Cortex-M CPU variant being used matches the expected
+ *          value if running on MPS3.
+ * @return  0 if successful, error code otherwise.
+*/
+int system_init(void);
+
+/**
+ * @brief  Releases the platform (MPS3 FPGA board or Fixed Virtual Platform).
+ **/
+void system_release(void);
+
+/**
+ * @brief  Return the name the platform (MPS3 FPGA board or Fixed Virtual Platform).
+ * @param[out]   name Platform name string.
+ * @param[in]    size Name string length.
+ **/
+void system_name(char* name, size_t size);
+
+#endif /* BAREMETAL_SYSTEM_INIT_H */
diff --git a/source/application/hal/platforms/bare-metal/utils/system_init.c b/source/application/hal/platforms/bare-metal/utils/system_init.c
new file mode 100644
index 0000000..0a6a1b3
--- /dev/null
+++ b/source/application/hal/platforms/bare-metal/utils/system_init.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "system_init.h"
+
+#include "uart_stdout.h"
+
+#include <string.h>
+
+#if defined(MPS3_PLATFORM)
+#define CREATE_MASK(msb, lsb)           (((1U << ((msb) - (lsb) + 1)) - 1) << (lsb))
+#define MASK_BITS(arg, msb, lsb)        ((arg) & CREATE_MASK(msb, lsb))
+#define EXTRACT_BITS(arg, msb, lsb)     (MASK_BITS(arg, msb, lsb) >> (lsb))
+#endif /* MPS3_PLATFORM */
+
+int system_init(void)
+{
+#if defined(MPS3_PLATFORM)
+    uint32_t id = 0;
+    uint32_t fpgaid = 0;
+    uint32_t apnote = 0;
+    uint32_t rev = 0;
+    uint32_t aid = 0;
+    uint32_t fpga_clk = 0;
+
+    /* Initialise the LEDs as the switches are */
+    MPS3_FPGAIO->LED = MPS3_FPGAIO->SWITCHES & 0xFF;
+#endif
+
+    /* UART init - will enable valid use of printf (stdout
+     * re-directed at this UART (UART0) */
+    UartStdOutInit();
+    info("Processor internal clock: %u Hz\n", GetSystemCoreClock());
+
+#if defined(MPS3_PLATFORM)
+    /* Get revision information from various registers */
+    rev = MPS3_SCC->CFG_REG4;
+    fpgaid = MPS3_SCC->SCC_ID;
+    aid = MPS3_SCC->SCC_AID;
+    apnote = EXTRACT_BITS(fpgaid, 15, 4);
+    fpga_clk = GetMPS3CoreClock();
+
+    info("V2M-MPS3 revision %c\n\n", rev + 'A');
+    info("Application Note AN%x, Revision %c\n", apnote,
+        EXTRACT_BITS(aid, 23, 20) + 'A');
+    info("MPS3 build %d\n", EXTRACT_BITS(aid, 31, 24));
+    info("MPS3 core clock has been set to: %d Hz\n", fpga_clk);
+
+    /* Display CPU ID */
+    id = SCB->CPUID;
+    info("CPU ID: 0x%08x\n", id);
+
+    if(EXTRACT_BITS(id, 15, 8) == 0xD2) {
+        if (EXTRACT_BITS(id, 7, 4) == 2) {
+            info ("CPU: Cortex-M55 r%dp%d\n\n",
+                EXTRACT_BITS(id, 23, 20),EXTRACT_BITS(id, 3, 0));
+#if defined (CPU_CORTEX_M55)
+            /* CPU ID should be "0x_41_0f_d2_20" for Cortex-M55 */
+            return 0;
+#endif /* CPU_CORTEX_M55 */
+        } else if (EXTRACT_BITS(id, 7, 4) == 1) {
+            info ("CPU: Cortex-M33 r%dp%d\n\n",
+                EXTRACT_BITS(id, 23, 20),EXTRACT_BITS(id, 3, 0));
+#if defined (CPU_CORTEX_M33)
+            return 0;
+#endif /* CPU_CORTEX_M33 */
+        } else if (EXTRACT_BITS(id, 7, 4) == 0) {
+            info ("CPU: Cortex-M23 r%dp%d\n\n",
+                EXTRACT_BITS(id, 23, 20),EXTRACT_BITS(id, 3, 0));
+        } else {
+            info ("CPU: Cortex-M processor family");
+        }
+    } else if (EXTRACT_BITS(id, 15, 8) == 0xC6) {
+        info ("CPU: Cortex-M%d+ r%dp%d\n\n",
+            EXTRACT_BITS(id, 7, 4), EXTRACT_BITS(id, 23, 20),
+            EXTRACT_BITS(id, 3, 0));
+    } else {
+        info ("CPU: Cortex-M%d r%dp%d\n\n",
+            EXTRACT_BITS(id, 7, 4), EXTRACT_BITS(id, 23, 20),
+            EXTRACT_BITS(id, 3, 0));
+    }
+#else /* MPS3_PLATFORM */
+
+    info("ARM model environment ready..\n");
+    return 0;
+#endif /* MPS3_PLATFORM */
+
+    /* If the CPU is anything other than M33 or M55, we return 1 */
+    printf_err("CPU mismatch!\n");
+    return 1;
+}
+
+void system_release(void)
+{
+    __disable_irq();
+}
+
+void system_name(char* name, size_t size)
+{
+#if defined (MPS3_PLATFORM)
+    strncpy(name, "mps3-bare", size);
+#else /* MPS3_PLATFORM */
+    strncpy(name, "FVP", size);
+#endif /* MPS3_PLATFORM */
+}
\ No newline at end of file
diff --git a/source/application/hal/platforms/native/data_acquisition/data_acq.c b/source/application/hal/platforms/native/data_acquisition/data_acq.c
new file mode 100644
index 0000000..01f47fa
--- /dev/null
+++ b/source/application/hal/platforms/native/data_acquisition/data_acq.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "data_acq.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+/**
+ * @brief   Initialize the acuisition.
+ * @return  0 if successful, error code otherwise.
+ **/
+static int acquisition_init(void)
+{
+    return 0;
+}
+
+/**
+ * @brief           Get the user input from stdin.
+ * @param[out]      user_input  String read from the stdin.
+ * @param[in,out]   size        String read length.
+ * @return          0 if successful, error code otherwise.
+ **/
+static int get_user_input(char* user_input, int size)
+{
+    fgets(user_input, size, stdin);
+    return 0;
+}
+
+int data_acq_channel_init(data_acq_module *module)
+{
+    assert(module);
+
+    module->system_init = acquisition_init;
+    module->get_input = get_user_input;
+    strncpy(module->system_name, "native",
+            sizeof(module->system_name));
+    module->inited = !module->system_init();
+    return !module->inited;
+}
+
+int data_acq_channel_release(data_acq_module *module)
+{
+    assert(module);
+    module->inited = 0;
+    return 0;
+}
diff --git a/source/application/hal/platforms/native/data_presentation/data_psn.c b/source/application/hal/platforms/native/data_presentation/data_psn.c
new file mode 100644
index 0000000..fe4bcfa
--- /dev/null
+++ b/source/application/hal/platforms/native/data_presentation/data_psn.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "data_psn.h"
+
+#include "log.h"
+
+#include <assert.h>
+#include <string.h>
+
+int data_psn_system_init(data_psn_module *module)
+{
+    assert(module);
+
+    module->system_init = log_psn_init;
+    module->present_data_image = log_display_image;
+    module->present_data_text = log_display_text;
+    module->present_box = log_display_box_icon;
+    module->set_text_color = log_set_text_color;
+    module->clear = log_clear;
+    strncpy(module->system_name, "log_psn", sizeof(module->system_name));
+    module->inited =  !module->system_init();
+    return !module->inited;
+}
+
+int data_psn_system_release(data_psn_module *module)
+{
+    /* Nothing to do here! */
+    assert(module);
+    module->inited = 0;
+    return 0;
+}
diff --git a/source/application/hal/platforms/native/data_presentation/log/include/log.h b/source/application/hal/platforms/native/data_presentation/log/include/log.h
new file mode 100644
index 0000000..10cf303
--- /dev/null
+++ b/source/application/hal/platforms/native/data_presentation/log/include/log.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef NATIVE_LOG_H
+#define NATIVE_LOG_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+/**
+ * @brief  Data presentation initialiser
+ **/
+int log_psn_init(void);
+
+/**
+ * @brief       Log parameters for the image to be displayed.
+ * @param[in]   data        Image pointer.
+ * @param[in]   width       Image width.
+ * @param[in]   height      Image height.
+ * @param[in]   channels    Number of channels.
+ * @param[in]   pos_x       Screen position x co-ordinate.
+ * @param[in]   pos_y       Screen position y co-ordinate.
+ * @param[in]   downsample_factor   Factor by which the image needs to be
+ *                                  down-sampled.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+
+int log_display_image(uint8_t* data, const uint32_t width,
+                      const uint32_t height, const uint32_t channels,
+                      const uint32_t pos_x, const uint32_t pos_y,
+                      const uint32_t downsample_factor);
+
+/**
+ * @brief       Log the parameters for text to be displayed.
+ * @param[in]   str         Pointer to a null terminated string.
+ * @param[in]   str_sz      Length of the string.
+ * @param[in]   pos_x       Screen position x co-ordinate.
+ * @param[in]   pos_y       Screen position y co-ordinate.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int log_display_text(const char* str, const size_t str_sz,
+                     const uint32_t pos_x, const uint32_t pos_y,
+                     const bool allow_multiple_lines);
+
+/**
+ * @brief       Log parameters for the box to be displayed.
+ * @param[in]   pos_x       Screen position x co-ordinate.
+ * @param[in]   pos_y       Screen position y co-ordinate.
+ * @param[in]   width       Width.
+ * @param[in]   height      Height.
+ * @param[in]   color       Fill color.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int log_display_box_icon(const uint32_t pos_x, const uint32_t pos_y,
+                         const uint32_t width, const uint32_t height, const uint16_t color);
+
+/**
+ * @brief       Logs the colour with which the display
+ *              needs to be cleared with.
+ * @param[in]   color       Fill color.
+ * @return      0 if successful, non-zero otherwise.
+ **/
+int log_clear(const uint16_t color);
+
+/**
+ * @brief       Logs the text color to be set.
+ * @param[in]   color       Fill color.
+ * @return 0 if successful, non-zero otherwise.
+ **/
+int log_set_text_color (const uint16_t color);
+
+#endif /* NATIVE_LOG_H */
\ No newline at end of file
diff --git a/source/application/hal/platforms/native/data_presentation/log/log.c b/source/application/hal/platforms/native/data_presentation/log/log.c
new file mode 100644
index 0000000..48e8b95
--- /dev/null
+++ b/source/application/hal/platforms/native/data_presentation/log/log.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "log.h"
+
+#include "dummy_log.h"
+
+#include <stdint.h>
+
+int log_psn_init(void)
+{
+    return 0;
+}
+
+int log_display_image(uint8_t* data, const uint32_t width,
+                      const uint32_t height, const uint32_t channels,
+                      const uint32_t pos_x, const uint32_t pos_y,
+                      const uint32_t downsample_factor)
+{
+    info("Image details\n");
+    info("Data:                 %p\n", data);
+    info("WxHxC:                %dx%dx%d\n", width, height, channels);
+    info("Pos (x,y):            (%d,%d)\n", pos_x, pos_y);
+    info("Downsampling factor:  %u\n", downsample_factor);
+    return 0;
+}
+
+int log_display_text(const char* str, const size_t str_sz,
+                     const uint32_t pos_x, const uint32_t pos_y,
+                     const bool allow_multiple_lines)
+{
+    UNUSED(allow_multiple_lines);
+    info("%s\n", str);
+    info("Text size: %lu, x: %d, y: %d\n", str_sz, pos_x, pos_y);
+    return 0;
+}
+
+
+int log_display_box_icon(const uint32_t pos_x, const uint32_t pos_y,
+                         const uint32_t width, const uint32_t height, 
+                         const uint16_t color)
+{
+    info("Showing rectangular, width: %d, height: %d, color: %d, x: %d, y: %d\n", 
+            width, height, color, pos_x, pos_y);
+    return 0;
+}
+
+int log_clear(const uint16_t color)
+{
+    info("Clearing with color: %d\n", color);
+    return 0;
+}
+
+int log_set_text_color (const uint16_t color)
+{
+    info("Setting text color: %d\n", color);
+    return 0;
+}
diff --git a/source/application/hal/platforms/native/timer/include/native_timer.h b/source/application/hal/platforms/native/timer/include/native_timer.h
new file mode 100644
index 0000000..df7b493
--- /dev/null
+++ b/source/application/hal/platforms/native/timer/include/native_timer.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TIMER_H
+#define TIMER_H
+
+#include <stdint.h>
+#include <time.h>
+
+/* Container for time struct */
+typedef struct _time_counter {
+    /* Current POSIX time in secs. */
+    time_t current_secs;
+    /* Nanoseconds expired in current second. */
+    time_t current_nsecs;
+} time_counter;
+
+#endif /* TIMER_H */
\ No newline at end of file
diff --git a/source/application/hal/platforms/native/timer/native_timer.cc b/source/application/hal/platforms/native/timer/native_timer.cc
new file mode 100644
index 0000000..c115f4d
--- /dev/null
+++ b/source/application/hal/platforms/native/timer/native_timer.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "timer.h"
+
+#include <cassert>
+#include <ctime>
+#include <cstring>
+
+#define MILLISECONDS_IN_SECOND      1000
+#define MICROSECONDS_IN_SECOND      1000000
+#define NANOSECONDS_IN_MILLISECOND  1000000
+#define NANOSECONDS_IN_MICROSECOND  1000
+
+/**
+ * @brief   Gets the current time counter value.
+ * @return  Counter value expressed in terms of time_counter struct.
+ **/
+static time_counter get_time_counter(void)
+{
+    struct timespec current_time{};
+    clock_gettime(1, &current_time);
+    time_counter t = {
+        .current_secs = current_time.tv_sec,
+        .current_nsecs = current_time.tv_nsec
+    };
+
+    return t;
+}
+
+/**
+ * @brief       Gets the time duration elapsed between start and end.
+ * @param[in]   start   Pointer to time_counter value at start time.
+ * @param[in]   end     Pointer to time_counter value at end.
+ * @return      Difference in milliseconds between the arguments expressed
+ *              as unsigned 32 bit integer.
+ **/
+static time_t get_duration_milliseconds(time_counter *start, time_counter *end)
+{
+    /* Convert both parts of time struct to ms then add for complete time. */
+    time_t seconds_part =
+        (end->current_secs - start->current_secs) * MILLISECONDS_IN_SECOND;
+    time_t nanoseconds_part =
+        (end->current_nsecs - start->current_nsecs) / NANOSECONDS_IN_MILLISECOND;
+
+    return seconds_part + nanoseconds_part;
+}
+
+/**
+ * @brief       Gets the time duration elapsed between start and end.
+ * @param[in]   start   Pointer to time_counter value at start time.
+ * @param[in]   end     Pointer to time_counter value at end.
+ * @return      Difference in microseconds between the arguments expressed
+ *              as unsigned 32 bit integer.
+ **/
+static time_t get_duration_microseconds(time_counter *start, time_counter *end)
+{
+    /* Convert both parts of time struct to us then add for complete time. */
+    time_t seconds_part =
+        (end->current_secs - start->current_secs) * MICROSECONDS_IN_SECOND;
+    time_t nanoseconds_part =
+        (end->current_nsecs - start->current_nsecs) / NANOSECONDS_IN_MICROSECOND;
+
+    return seconds_part + nanoseconds_part;
+}
+
+/**
+ * @brief Stub for timer reset.
+ **/
+void reset_timer() {}
+
+/**
+ * @brief Initialise the timer for this platform.
+ **/
+void init_timer(platform_timer *timer)
+{
+    assert(timer);
+    memset(timer, 0, sizeof(*timer));
+
+    timer->get_time_counter = get_time_counter;
+    timer->start_profiling = get_time_counter;
+    timer->stop_profiling = get_time_counter;
+    timer->get_duration_ms = get_duration_milliseconds;
+    timer->cap.duration_ms = 1;
+    timer->get_duration_us = get_duration_microseconds;
+    timer->cap.duration_us = 1;
+    timer->reset = reset_timer;
+    timer->inited = 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/application/hal/platforms/native/utils/include/dummy_log.h b/source/application/hal/platforms/native/utils/include/dummy_log.h
new file mode 100644
index 0000000..626436a
--- /dev/null
+++ b/source/application/hal/platforms/native/utils/include/dummy_log.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DUMMY_LOG_H
+#define DUMMY_LOG_H
+
+#include <stdio.h>
+
+#define LOG_LEVEL_TRACE       0
+#define LOG_LEVEL_DEBUG       1
+#define LOG_LEVEL_INFO        2
+#define LOG_LEVEL_WARN        3
+#define LOG_LEVEL_ERROR       4
+
+#ifndef LOG_LEVEL
+#define LOG_LEVEL             LOG_LEVEL_INFO
+#endif /*LOG_LEVEL*/
+
+#define UNUSED(x)       ((void)(x))
+
+#if (LOG_LEVEL == LOG_LEVEL_TRACE)
+    #define trace(...)        printf("[TRACE] "); printf(__VA_ARGS__)
+#else
+    #define trace(...)
+#endif  /* LOG_LEVEL == LOG_LEVEL_TRACE */
+
+#if (LOG_LEVEL <= LOG_LEVEL_DEBUG)
+    #define debug(...)        printf("[DEBUG] "); printf(__VA_ARGS__)
+#else
+    #define debug(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_TRACE */
+
+#if (LOG_LEVEL <= LOG_LEVEL_INFO)
+    #define info(...)         printf("[INFO] "); printf(__VA_ARGS__)
+#else
+    #define info(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_DEBUG */
+
+#if (LOG_LEVEL <= LOG_LEVEL_WARN)
+    #define warn(...)         printf("[WARN] "); printf(__VA_ARGS__)
+#else
+    #define warn(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_INFO */
+
+#if (LOG_LEVEL <= LOG_LEVEL_ERROR)
+    #define printf_err(...)   printf("[ERROR] "); printf(__VA_ARGS__)
+#else
+    #define printf_err(...)
+#endif  /* LOG_LEVEL > LOG_LEVEL_INFO */
+
+#endif /* DUMMY_LOG_H */
\ No newline at end of file
diff --git a/source/application/hal/platforms/native/utils/include/system_init.h b/source/application/hal/platforms/native/utils/include/system_init.h
new file mode 100644
index 0000000..80b1bb2
--- /dev/null
+++ b/source/application/hal/platforms/native/utils/include/system_init.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef NATIVE_SYSTEM_INIT_H
+#define NATIVE_SYSTEM_INIT_H
+
+#include "dummy_log.h"
+
+/**
+ * @brief  Platform initialisation for native platform.
+ **/
+int system_init(void);
+
+/**
+ * @brief  Platform release for native platform.
+ **/
+void system_release(void);
+
+/**
+ * @brief       Returns the name of the platform.
+ * @param[out]  name Platform name string.
+ * @param[in]   size Name string length.
+ */
+void system_name(char* name, size_t size);
+
+#endif /* NATIVE_SYSTEM_INIT_H */
diff --git a/source/application/hal/platforms/native/utils/system_init.c b/source/application/hal/platforms/native/utils/system_init.c
new file mode 100644
index 0000000..8e0b768
--- /dev/null
+++ b/source/application/hal/platforms/native/utils/system_init.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "system_init.h"
+
+#include <string.h>
+
+int system_init(void)
+{
+    return 0;
+}
+
+void system_release(void)
+{}
+
+void system_name(char* name, size_t size)
+{
+    strncpy(name, "native", size);
+}
\ No newline at end of file
diff --git a/source/application/main/Classifier.cc b/source/application/main/Classifier.cc
new file mode 100644
index 0000000..bc2c378
--- /dev/null
+++ b/source/application/main/Classifier.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Classifier.hpp"
+
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <vector>
+#include <string>
+#include <set>
+#include <cstdint>
+
+namespace arm {
+namespace app {
+
+    template<typename T>
+    bool Classifier::_GetTopNResults(TfLiteTensor* tensor,
+                         std::vector<ClassificationResult>& vecResults,
+                         uint32_t topNCount,
+                         const std::vector <std::string>& labels)
+    {
+        std::set<std::pair<T, uint32_t>> sortedSet;
+
+        /* NOTE: inputVec's size verification against labels should be
+         *       checked by the calling/public function. */
+        T* tensorData = tflite::GetTensorData<T>(tensor);
+
+        /* Set initial elements. */
+        for (uint32_t i = 0; i < topNCount; ++i) {
+            sortedSet.insert({tensorData[i], i});
+        }
+
+        /* Initialise iterator. */
+        auto setFwdIter = sortedSet.begin();
+
+        /* Scan through the rest of elements with compare operations. */
+        for (uint32_t i = topNCount; i < labels.size(); ++i) {
+            if (setFwdIter->first < tensorData[i]) {
+                sortedSet.erase(*setFwdIter);
+                sortedSet.insert({tensorData[i], i});
+                setFwdIter = sortedSet.begin();
+            }
+        }
+
+        /* Final results' container. */
+        vecResults = std::vector<ClassificationResult>(topNCount);
+
+        /* For getting the floating point values, we need quantization parameters. */
+        QuantParams quantParams = GetTensorQuantParams(tensor);
+
+        /* Reset the iterator to the largest element - use reverse iterator. */
+        auto setRevIter = sortedSet.rbegin();
+
+        /* Populate results
+         * Note: we could combine this loop with the loop above, but that
+         *       would, involve more multiplications and other operations.
+         **/
+        for (size_t i = 0; i < vecResults.size(); ++i, ++setRevIter) {
+            double score = static_cast<int> (setRevIter->first);
+            vecResults[i].m_normalisedVal = quantParams.scale *
+                                         (score - quantParams.offset);
+            vecResults[i].m_label = labels[setRevIter->second];
+            vecResults[i].m_labelIdx = setRevIter->second;
+        }
+
+        return true;
+    }
+
+    template<>
+    bool Classifier::_GetTopNResults<float>(TfLiteTensor* tensor,
+                                     std::vector<ClassificationResult>& vecResults,
+                                     uint32_t topNCount,
+                                     const std::vector <std::string>& labels)
+    {
+        std::set<std::pair<float, uint32_t>> sortedSet;
+
+        /* NOTE: inputVec's size verification against labels should be
+         *       checked by the calling/public function. */
+        float* tensorData = tflite::GetTensorData<float>(tensor);
+
+        /* Set initial elements. */
+        for (uint32_t i = 0; i < topNCount; ++i) {
+            sortedSet.insert({tensorData[i], i});
+        }
+
+        /* Initialise iterator. */
+        auto setFwdIter = sortedSet.begin();
+
+        /* Scan through the rest of elements with compare operations. */
+        for (uint32_t i = topNCount; i < labels.size(); ++i) {
+            if (setFwdIter->first < tensorData[i]) {
+                sortedSet.erase(*setFwdIter);
+                sortedSet.insert({tensorData[i], i});
+                setFwdIter = sortedSet.begin();
+            }
+        }
+
+        /* Final results' container. */
+        vecResults = std::vector<ClassificationResult>(topNCount);
+
+        /* Reset the iterator to the largest element - use reverse iterator. */
+        auto setRevIter = sortedSet.rbegin();
+
+        /* Populate results
+         * Note: we could combine this loop with the loop above, but that
+         *       would, involve more multiplications and other operations.
+         **/
+        for (size_t i = 0; i < vecResults.size(); ++i, ++setRevIter) {
+            vecResults[i].m_normalisedVal = setRevIter->first;
+            vecResults[i].m_label = labels[setRevIter->second];
+            vecResults[i].m_labelIdx = setRevIter->second;
+        }
+
+        return true;
+    }
+
+    template bool  Classifier::_GetTopNResults<uint8_t>(TfLiteTensor* tensor,
+                                           std::vector<ClassificationResult>& vecResults,
+                                           uint32_t topNCount, const std::vector <std::string>& labels);
+
+    template bool  Classifier::_GetTopNResults<int8_t>(TfLiteTensor* tensor,
+                                          std::vector<ClassificationResult>& vecResults,
+                                          uint32_t topNCount, const std::vector <std::string>& labels);
+
+    bool  Classifier::GetClassificationResults(
+        TfLiteTensor* outputTensor,
+        std::vector<ClassificationResult>& vecResults,
+        const std::vector <std::string>& labels, uint32_t topNCount)
+    {
+        if (outputTensor == nullptr) {
+            printf_err("Output vector is null pointer.\n");
+            return false;
+        }
+
+        uint32_t totalOutputSize = 1;
+        for (int inputDim = 0; inputDim < outputTensor->dims->size; inputDim++){
+            totalOutputSize *= outputTensor->dims->data[inputDim];
+        }
+
+        /* Sanity checks. */
+        if (totalOutputSize < topNCount) {
+            printf_err("Output vector is smaller than %u\n", topNCount);
+            return false;
+        } else if (totalOutputSize != labels.size()) {
+            printf_err("Output size doesn't match the labels' size\n");
+            return false;
+        }
+
+        bool resultState;
+        vecResults.clear();
+
+        /* Get the top N results. */
+        switch (outputTensor->type) {
+            case kTfLiteUInt8:
+                resultState = _GetTopNResults<uint8_t>(outputTensor, vecResults, topNCount, labels);
+                break;
+            case kTfLiteInt8:
+                resultState = _GetTopNResults<int8_t>(outputTensor, vecResults, topNCount, labels);
+                break;
+            case kTfLiteFloat32:
+                resultState = _GetTopNResults<float>(outputTensor, vecResults, topNCount, labels);
+                break;
+            default:
+                printf_err("Tensor type %s not supported by classifier\n", TfLiteTypeGetName(outputTensor->type));
+                return false;
+        }
+
+        if (!resultState) {
+            printf_err("Failed to get sorted set\n");
+            return false;
+        }
+
+        return true;
+    }
+
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/main/Main.cc b/source/application/main/Main.cc
new file mode 100644
index 0000000..6e1c620
--- /dev/null
+++ b/source/application/main/Main.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/****************************************************************************\
+ *               Main application file for ARM NPU on MPS3 board             *
+\****************************************************************************/
+
+#include "hal.h"                    /* our hardware abstraction api */
+#include "TensorFlowLiteMicro.hpp"  /* our inference logic api */
+
+#include <cstdio>
+
+extern void main_loop(hal_platform& platform);
+
+#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+__ASM(" .global __ARM_use_no_argv\n");
+#endif
+
+/* Print application information. */
+static void print_application_intro()
+{
+    info("%s\n", PRJ_DES_STR);
+    info("Target system design: %s\n", DESIGN_NAME);
+    info("Version %s Build date: " __DATE__ " @ " __TIME__ "\n", PRJ_VER_STR);
+    info("Copyright (C) ARM Ltd 2020. All rights reserved.\n\n");
+}
+
+int main ()
+{
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+
+    if (0 == hal_platform_init(&platform)) {
+        /* Application information, UART should have been initialised. */
+        print_application_intro();
+
+        /* Check the version of TensorFlow Lite Micro. */
+        PrintTensorFlowVersion();
+
+        /* Run the application. */
+        main_loop(platform);
+    }
+
+    /* This is unreachable without errors. */
+    info("program terminating...\n");
+
+    /* Release platform. */
+    hal_platform_release(&platform);
+    return 0;
+}
+
diff --git a/source/application/main/Mfcc.cc b/source/application/main/Mfcc.cc
new file mode 100644
index 0000000..bf16159
--- /dev/null
+++ b/source/application/main/Mfcc.cc
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Mfcc.hpp"
+
+#include "PlatformMath.hpp"
+
+#include <cfloat>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    MfccParams::MfccParams(
+                    const float samplingFreq,
+                    const uint32_t numFbankBins,
+                    const float melLoFreq,
+                    const float melHiFreq,
+                    const uint32_t numMfccFeats,
+                    const uint32_t frameLen,
+                    const bool useHtkMethod):
+                        m_samplingFreq(samplingFreq),
+                        m_numFbankBins(numFbankBins),
+                        m_melLoFreq(melLoFreq),
+                        m_melHiFreq(melHiFreq),
+                        m_numMfccFeatures(numMfccFeats),
+                        m_frameLen(frameLen),
+
+                        /* Smallest power of 2 >= frame length. */
+                        m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
+                        m_useHtkMethod(useHtkMethod)
+    {}
+
+    std::string MfccParams::Str()
+    {
+        char strC[1024];
+        snprintf(strC, sizeof(strC) - 1, "\n   \
+            \n\t Sampling frequency:         %f\
+            \n\t Number of filter banks:     %u\
+            \n\t Mel frequency limit (low):  %f\
+            \n\t Mel frequency limit (high): %f\
+            \n\t Number of MFCC features:    %u\
+            \n\t Frame length:               %u\
+            \n\t Padded frame length:        %u\
+            \n\t Using HTK for Mel scale:    %s\n",
+                this->m_samplingFreq, this->m_numFbankBins, this->m_melLoFreq,
+                this->m_melHiFreq, this->m_numMfccFeatures, this->m_frameLen,
+                this->m_frameLenPadded, this->m_useHtkMethod ? "yes" : "no");
+        return std::string{strC};
+    }
+
+    MFCC::MFCC(const MfccParams& params):
+        _m_params(params),
+        _m_filterBankInitialised(false)
+    {
+        this->_m_buffer = std::vector<float>(
+                            this->_m_params.m_frameLenPadded, 0.0);
+        this->_m_frame = std::vector<float>(
+                            this->_m_params.m_frameLenPadded, 0.0);
+        this->_m_melEnergies = std::vector<float>(
+                                this->_m_params.m_numFbankBins, 0.0);
+
+        this->_m_windowFunc = std::vector<float>(this->_m_params.m_frameLen);
+        const float multiplier = 2 * M_PI / this->_m_params.m_frameLen;
+
+        /* Create window function. */
+        for (size_t i = 0; i < this->_m_params.m_frameLen; i++) {
+            this->_m_windowFunc[i] = (0.5 - (0.5 *
+                math::MathUtils::CosineF32(static_cast<float>(i) * multiplier)));
+        }
+
+        math::MathUtils::FftInitF32(this->_m_params.m_frameLenPadded, this->_m_fftInstance);
+        debug("Instantiated MFCC object: %s\n", this->_m_params.Str().c_str());
+    }
+
+    void MFCC::Init()
+    {
+        this->_InitMelFilterBank();
+    }
+
+    float MFCC::MelScale(const float freq, const bool useHTKMethod)
+    {
+        if (useHTKMethod) {
+            return 1127.0f * logf (1.0f + freq / 700.0f);
+        } else {
+            /* Slaney formula for mel scale. */
+
+            float mel = freq / ms_freqStep;
+
+            if (freq >= ms_minLogHz) {
+                mel = ms_minLogMel + logf(freq / ms_minLogHz) / ms_logStep;
+            }
+            return mel;
+        }
+    }
+
+    float MFCC::InverseMelScale(const float melFreq, const bool useHTKMethod)
+    {
+        if (useHTKMethod) {
+            return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
+        } else {
+            /* Slaney formula for mel scale. */
+            float freq = ms_freqStep * melFreq;
+
+            if (melFreq >= ms_minLogMel) {
+                freq = ms_minLogHz * expf(ms_logStep * (melFreq - ms_minLogMel));
+            }
+            return freq;
+        }
+    }
+
+
+    bool MFCC::ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies)
+    {
+        const size_t numBanks = melEnergies.size();
+
+        if (numBanks != filterBankFilterFirst.size() ||
+                numBanks != filterBankFilterLast.size()) {
+            printf_err("unexpected filter bank lengths\n");
+            return false;
+        }
+
+        for (size_t bin = 0; bin < numBanks; ++bin) {
+            auto filterBankIter = melFilterBank[bin].begin();
+            float melEnergy = FLT_MIN;  /* Avoid log of zero at later stages */
+            int32_t firstIndex = filterBankFilterFirst[bin];
+            int32_t lastIndex = filterBankFilterLast[bin];
+
+            for (int i = firstIndex; i <= lastIndex; i++) {
+                float energyRep = math::MathUtils::SqrtF32(fftVec[i]);
+                melEnergy += (*filterBankIter++ * energyRep);
+            }
+
+            melEnergies[bin] = melEnergy;
+        }
+
+        return true;
+    }
+
+    void MFCC::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+    {
+        for (size_t bin = 0; bin < melEnergies.size(); ++bin) {
+            melEnergies[bin] = logf(melEnergies[bin]);
+        }
+    }
+
+    void MFCC::_ConvertToPowerSpectrum()
+    {
+        const uint32_t halfDim = this->_m_params.m_frameLenPadded / 2;
+
+        /* Handle this special case. */
+        float firstEnergy = this->_m_buffer[0] * this->_m_buffer[0];
+        float lastEnergy = this->_m_buffer[1] * this->_m_buffer[1];
+
+        math::MathUtils::ComplexMagnitudeSquaredF32(
+                            this->_m_buffer.data(),
+                            this->_m_buffer.size(),
+                            this->_m_buffer.data(),
+                            this->_m_buffer.size()/2);
+
+        this->_m_buffer[0] = firstEnergy;
+        this->_m_buffer[halfDim] = lastEnergy;
+    }
+
+    std::vector<float> MFCC::CreateDCTMatrix(
+                                const int32_t inputLength,
+                                const int32_t coefficientCount)
+    {
+        std::vector<float> dctMatix(inputLength * coefficientCount);
+
+        const float normalizer = math::MathUtils::SqrtF32(2.0f/inputLength);
+        const float angleIncr = M_PI/inputLength;
+        float angle = 0;
+
+        for (int32_t k = 0, m = 0; k < coefficientCount; k++, m += inputLength) {
+            for (int32_t n = 0; n < inputLength; n++) {
+                dctMatix[m+n] = normalizer *
+                    math::MathUtils::CosineF32((n + 0.5) * angle);
+            }
+            angle += angleIncr;
+        }
+
+        return dctMatix;
+    }
+
+    float MFCC::GetMelFilterBankNormaliser(
+                    const float&    leftMel,
+                    const float&    rightMel,
+                    const bool      useHTKMethod)
+    {
+        UNUSED(leftMel);
+        UNUSED(rightMel);
+        UNUSED(useHTKMethod);
+
+        /* By default, no normalisation => return 1 */
+        return 1.f;
+    }
+
+    void MFCC::_InitMelFilterBank()
+    {
+        if (!this->_IsMelFilterBankInited()) {
+            this->_m_melFilterBank = this->_CreateMelFilterBank();
+            this->_m_dctMatrix = this->CreateDCTMatrix(
+                                    this->_m_params.m_numFbankBins,
+                                    this->_m_params.m_numMfccFeatures);
+            this->_m_filterBankInitialised = true;
+        }
+    }
+
+    bool MFCC::_IsMelFilterBankInited()
+    {
+        return this->_m_filterBankInitialised;
+    }
+
+    void MFCC::_MfccComputePreFeature(const std::vector<int16_t>& audioData)
+    {
+        this->_InitMelFilterBank();
+
+        /* TensorFlow way of normalizing .wav data to (-1, 1). */
+        constexpr float normaliser = 1.0/(1<<15);
+        for (size_t i = 0; i < this->_m_params.m_frameLen; i++) {
+            this->_m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
+        }
+
+        /* Apply window function to input frame. */
+        for(size_t i = 0; i < this->_m_params.m_frameLen; i++) {
+            this->_m_frame[i] *= this->_m_windowFunc[i];
+        }
+
+        /* Set remaining frame values to 0. */
+        std::fill(this->_m_frame.begin() + this->_m_params.m_frameLen,this->_m_frame.end(), 0);
+
+        /* Compute FFT. */
+        math::MathUtils::FftF32(this->_m_frame, this->_m_buffer, this->_m_fftInstance);
+
+        /* Convert to power spectrum. */
+        this->_ConvertToPowerSpectrum();
+
+        /* Apply mel filterbanks. */
+        if (!this->ApplyMelFilterBank(this->_m_buffer,
+                                      this->_m_melFilterBank,
+                                      this->_m_filterBankFilterFirst,
+                                      this->_m_filterBankFilterLast,
+                                      this->_m_melEnergies)) {
+            printf_err("Failed to apply MEL filter banks\n");
+        }
+
+        /* Convert to logarithmic scale. */
+        this->ConvertToLogarithmicScale(this->_m_melEnergies);
+    }
+
+    std::vector<float> MFCC::MfccCompute(const std::vector<int16_t>& audioData)
+    {
+        this->_MfccComputePreFeature(audioData);
+
+        std::vector<float> mfccOut(this->_m_params.m_numMfccFeatures);
+
+        float * ptrMel = this->_m_melEnergies.data();
+        float * ptrDct = this->_m_dctMatrix.data();
+        float * ptrMfcc = mfccOut.data();
+
+        /* Take DCT. Uses matrix mul. */
+        for (size_t i = 0, j = 0; i < mfccOut.size();
+                    ++i, j += this->_m_params.m_numFbankBins) {
+            *ptrMfcc++ = math::MathUtils::DotProductF32(
+                                            ptrDct + j,
+                                            ptrMel,
+                                            this->_m_params.m_numFbankBins);
+        }
+        return mfccOut;
+    }
+
+    std::vector<std::vector<float>> MFCC::_CreateMelFilterBank()
+    {
+        size_t numFftBins = this->_m_params.m_frameLenPadded / 2;
+        float fftBinWidth = static_cast<float>(this->_m_params.m_samplingFreq) / this->_m_params.m_frameLenPadded;
+
+        float melLowFreq = MFCC::MelScale(this->_m_params.m_melLoFreq,
+                                          this->_m_params.m_useHtkMethod);
+        float melHighFreq = MFCC::MelScale(this->_m_params.m_melHiFreq,
+                                           this->_m_params.m_useHtkMethod);
+        float melFreqDelta = (melHighFreq - melLowFreq) / (this->_m_params.m_numFbankBins + 1);
+
+        std::vector<float> thisBin = std::vector<float>(numFftBins);
+        std::vector<std::vector<float>> melFilterBank(
+                                            this->_m_params.m_numFbankBins);
+        this->_m_filterBankFilterFirst =
+                        std::vector<int32_t>(this->_m_params.m_numFbankBins);
+        this->_m_filterBankFilterLast =
+                        std::vector<int32_t>(this->_m_params.m_numFbankBins);
+
+        for (size_t bin = 0; bin < this->_m_params.m_numFbankBins; bin++) {
+            float leftMel = melLowFreq + bin * melFreqDelta;
+            float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
+            float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
+
+            int32_t firstIndex = -1;
+            int32_t lastIndex = -1;
+            const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->_m_params.m_useHtkMethod);
+
+            for (size_t i = 0; i < numFftBins; i++) {
+                float freq = (fftBinWidth * i);  /* Center freq of this fft bin. */
+                float mel = MFCC::MelScale(freq, this->_m_params.m_useHtkMethod);
+                thisBin[i] = 0.0;
+
+                if (mel > leftMel && mel < rightMel) {
+                    float weight;
+                    if (mel <= centerMel) {
+                        weight = (mel - leftMel) / (centerMel - leftMel);
+                    } else {
+                        weight = (rightMel - mel) / (rightMel - centerMel);
+                    }
+
+                    thisBin[i] = weight * normaliser;
+                    if (firstIndex == -1) {
+                        firstIndex = i;
+                    }
+                    lastIndex = i;
+                }
+            }
+
+            this->_m_filterBankFilterFirst[bin] = firstIndex;
+            this->_m_filterBankFilterLast[bin] = lastIndex;
+
+            /* Copy the part we care about. */
+            for (int32_t i = firstIndex; i <= lastIndex; i++) {
+                melFilterBank[bin].push_back(thisBin[i]);
+            }
+        }
+
+        return melFilterBank;
+    }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/application/main/PlatformMath.cc b/source/application/main/PlatformMath.cc
new file mode 100644
index 0000000..a9f5049
--- /dev/null
+++ b/source/application/main/PlatformMath.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "PlatformMath.hpp"
+
+#if 0 == ARM_DSP_AVAILABLE
+    #include <cmath>
+    #include <numeric>
+#endif /* 0 == ARM_DSP_AVAILABLE */
+
+namespace arm {
+namespace app {
+namespace math {
+
+    float MathUtils::CosineF32(float radians)
+    {
+#if ARM_DSP_AVAILABLE
+        return arm_cos_f32(radians);
+#else /* ARM_DSP_AVAILABLE */
+        return cos(radians);
+#endif /* ARM_DSP_AVAILABLE */
+    }
+
+    float MathUtils::SqrtF32(float input)
+    {
+#if ARM_DSP_AVAILABLE
+        float output = 0.f;
+        arm_sqrt_f32(input, &output);
+        return output;
+#else /* ARM_DSP_AVAILABLE */
+        return sqrtf(input);
+#endif /* ARM_DSP_AVAILABLE */
+    }
+
+    float MathUtils::MeanF32(float* ptrSrc, const uint32_t srcLen)
+    {
+        if (!srcLen) {
+            return 0.f;
+        }
+
+#if ARM_DSP_AVAILABLE
+        float result = 0.f;
+        arm_mean_f32(ptrSrc, srcLen, &result);
+        return result;
+#else /* ARM_DSP_AVAILABLE */
+        float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0);
+        return acc/srcLen;
+#endif /* ARM_DSP_AVAILABLE */
+    }
+
+    float MathUtils::StdDevF32(float* ptrSrc, const uint32_t srcLen,
+                               const float mean)
+    {
+        if (!srcLen) {
+            return 0.f;
+        }
+#if ARM_DSP_AVAILABLE
+        /**
+         * Note Standard deviation calculation can be off
+         * by > 0.01 but less than < 0.1, according to
+         * preliminary findings.
+         **/
+        UNUSED(mean);
+        float stdDev = 0;
+        arm_std_f32(ptrSrc, srcLen, &stdDev);
+        return stdDev;
+#else /* ARM_DSP_AVAILABLE */
+        auto VarianceFunction = [=](float acc, const float value) {
+            return acc + (((value - mean) * (value - mean))/ srcLen);
+        };
+
+        float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0,
+                                    VarianceFunction);
+
+        return sqrtf(acc);
+#endif /* ARM_DSP_AVAILABLE */
+    }
+
+    bool MathUtils::FftInitF32(const uint16_t fftLen, arm::app::math::FftInstance& fftInstance)
+    {
+#if ARM_DSP_AVAILABLE
+        if (!fftInstance.initialised) {
+            arm_status status = arm_rfft_fast_init_f32(&fftInstance.instance, fftLen);
+
+            if (ARM_MATH_SUCCESS != status) {
+                return false;
+            }
+            fftInstance.initialised = true;
+        }
+#else
+        UNUSED(fftLen);
+        UNUSED(fftInstance);
+#endif /* ARM_DSP_AVAILABLE */
+        return true;
+    }
+
+    void MathUtils::FftF32(std::vector<float>& input,
+                           std::vector<float>& fftOutput,
+                           arm::app::math::FftInstance& fftInstance)
+    {
+#if ARM_DSP_AVAILABLE
+        arm_rfft_fast_f32(&fftInstance.instance, input.data(), fftOutput.data(), 0);
+#else
+        UNUSED(fftInstance);
+        const int inputLength = input.size();
+
+        for (int k = 0; k <= inputLength / 2; k++) {
+            float sumReal = 0, sumImag = 0;
+
+            for (int t = 0; t < inputLength; t++) {
+                float angle = 2 * M_PI * t * k / inputLength;
+                sumReal += input[t] * cosf(angle);
+                sumImag += -input[t] * sinf(angle);
+            }
+
+            /* Arrange output to [real0, realN/2, real1, im1, real2, im2, ...] */
+            if (k == 0) {
+                fftOutput[0] = sumReal;
+            } else if (k == inputLength / 2) {
+                fftOutput[1] = sumReal;
+            } else {
+                fftOutput[k*2] = sumReal;
+                fftOutput[k*2 + 1] = sumImag;
+            };
+        }
+#endif /* ARM_DSP_AVAILABLE */
+    }
+
+    void MathUtils::VecLogarithmF32(std::vector <float>& input,
+                                    std::vector <float>& output)
+    {
+#if ARM_DSP_AVAILABLE
+        arm_vlog_f32(input.data(), output.data(),
+                     output.size());
+#else /* ARM_DSP_AVAILABLE */
+        for (auto in = input.begin(), out = output.begin();
+                in != input.end(); ++in, ++out) {
+            *out = logf(*in);
+        }
+#endif /* ARM_DSP_AVAILABLE */
+    }
+
+    float MathUtils::DotProductF32(float* srcPtrA, float* srcPtrB,
+                                   const uint32_t srcLen)
+    {
+        float output = 0.f;
+
+#if ARM_DSP_AVAILABLE
+        arm_dot_prod_f32(srcPtrA, srcPtrB, srcLen, &output);
+#else /* ARM_DSP_AVAILABLE */
+        for (uint32_t i = 0; i < srcLen; ++i) {
+            output += *srcPtrA++ * *srcPtrB++;
+        }
+#endif /* ARM_DSP_AVAILABLE */
+
+        return output;
+    }
+
+    bool MathUtils::ComplexMagnitudeSquaredF32(float* ptrSrc,
+                                               const uint32_t srcLen,
+                                               float* ptrDst,
+                                               const uint32_t dstLen)
+    {
+        if (dstLen < srcLen/2) {
+            printf_err("dstLen must be greater than srcLen/2");
+            return false;
+        }
+
+#if ARM_DSP_AVAILABLE
+        arm_cmplx_mag_squared_f32(ptrSrc, ptrDst, srcLen/2);
+#else /* ARM_DSP_AVAILABLE */
+        for (uint32_t j = 0; j < srcLen; ++j) {
+            const float real = *ptrSrc++;
+            const float im = *ptrSrc++;
+            *ptrDst++ = real*real + im*im;
+        }
+#endif /* ARM_DSP_AVAILABLE */
+        return true;
+    }
+
+} /* namespace math */
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/main/Profiler.cc b/source/application/main/Profiler.cc
new file mode 100644
index 0000000..f364759
--- /dev/null
+++ b/source/application/main/Profiler.cc
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Profiler.hpp"
+
+#include <cstring>
+#include <string>
+#include <sstream>
+
+namespace arm {
+namespace app {
+
+    template<class T>
+    static void writeStatLine(std::ostringstream& s,
+                              const char* desc,
+                              T total,
+                              uint32_t samples,
+                              T min,
+                              T max)
+    {
+        s << "\t" << desc << total << " / "
+          << ((double)total / samples) << " / "
+          << min << " / " << max << std::endl;
+    }
+
+    Profiler::Profiler(hal_platform* platform, const char* name = "Unknown")
+    : _m_name(name)
+    {
+        if (platform && platform->inited) {
+            this->_m_pPlatform = platform;
+            this->Reset();
+        } else {
+            printf_err("Profiler %s initialised with invalid platform\n",
+                this->_m_name.c_str());
+        }
+    }
+
+    bool Profiler::StartProfiling(const char* name)
+    {
+        if (name) {
+            this->SetName(name);
+        }
+        if (this->_m_pPlatform && !this->_m_started) {
+            this->_m_pPlatform->timer->reset();
+            this->_m_tstampSt = this->_m_pPlatform->timer->start_profiling();
+            this->_m_started = true;
+            return true;
+        }
+        printf_err("Failed to start profiler %s\n", this->_m_name.c_str());
+        return false;
+    }
+
+    bool Profiler::StopProfiling()
+    {
+        if (this->_m_pPlatform && this->_m_started) {
+            this->_m_tstampEnd = this->_m_pPlatform->timer->stop_profiling();
+            this->_m_started = false;
+
+            this->_AddProfilingUnit(this->_m_tstampSt, this->_m_tstampEnd, this->_m_name);
+
+            return true;
+        }
+        printf_err("Failed to stop profiler %s\n", this->_m_name.c_str());
+        return false;
+    }
+
+    bool Profiler::StopProfilingAndReset()
+    {
+        if (this->StopProfiling()) {
+            this->Reset();
+            return true;
+        }
+        printf_err("Failed to stop profiler %s\n", this->_m_name.c_str());
+        return false;
+    }
+
+    void Profiler::Reset()
+    {
+        this->_m_started = false;
+        memset(&this->_m_tstampSt, 0, sizeof(this->_m_tstampSt));
+        memset(&this->_m_tstampEnd, 0, sizeof(this->_m_tstampEnd));
+    }
+
+    std::string Profiler::GetResultsAndReset()
+    {
+        std::ostringstream strResults;
+
+        for (const auto& item: this->_m_series) {
+            auto name = item.first;
+            ProfilingSeries series = item.second;
+
+            uint32_t samplesNum = series.size();
+
+            uint64_t totalNpuCycles = 0;        /* Total NPU cycles (idle + active). */
+            uint64_t totalActiveNpuCycles = 0;  /* Active NPU cycles. */
+            uint64_t totalCpuCycles = 0;        /* Total CPU cycles. */
+            time_t totalTimeMs = 0;
+
+            uint64_t minActiveNpuCycles = series[0].activeNpuCycles;
+            uint64_t minIdleNpuCycles = series[0].npuCycles - minActiveNpuCycles;
+            uint64_t minActiveCpuCycles = series[0].cpuCycles - minActiveNpuCycles;
+            time_t minTimeMs = series[0].time;
+
+            uint64_t maxIdleNpuCycles = 0;
+            uint64_t maxActiveNpuCycles = 0;
+            uint64_t maxActiveCpuCycles = 0;
+            time_t maxTimeMs = 0;
+
+            for(ProfilingUnit& unit: series){
+                totalNpuCycles += unit.npuCycles;
+                totalActiveNpuCycles += unit.activeNpuCycles;
+                totalCpuCycles += unit.cpuCycles;
+                totalTimeMs += unit.time;
+
+                maxActiveNpuCycles = std::max(maxActiveNpuCycles,
+                                              unit.activeNpuCycles);
+                maxIdleNpuCycles = std::max(maxIdleNpuCycles,
+                                            unit.npuCycles - maxActiveNpuCycles);
+                maxActiveCpuCycles = std::max(maxActiveCpuCycles,
+                                              unit.cpuCycles - maxActiveNpuCycles);
+                maxTimeMs = std::max(maxTimeMs, unit.time);
+
+                minActiveNpuCycles = std::min(minActiveNpuCycles,
+                                              unit.activeNpuCycles);
+                minIdleNpuCycles = std::min(minIdleNpuCycles,
+                                            unit.npuCycles - minActiveNpuCycles);
+                minActiveCpuCycles = std::min(minActiveCpuCycles,
+                                              unit.cpuCycles - minActiveNpuCycles);
+                minTimeMs = std::min(minTimeMs, unit.time);
+            }
+
+            strResults << "Profile for " << name << ": " << std::endl;
+
+            if (samplesNum > 1) {
+                strResults << "\tSamples: " << samplesNum << std::endl;
+                strResults << "\t                            Total / Avg./ Min / Max"
+                           << std::endl;
+
+                writeStatLine<uint64_t>(strResults, "Active NPU cycles:          ",
+                                        totalActiveNpuCycles, samplesNum,
+                                        minActiveNpuCycles, maxActiveNpuCycles);
+
+                writeStatLine<uint64_t>(strResults, "Idle NPU cycles:            ",
+                                        (totalNpuCycles - totalActiveNpuCycles),
+                                        samplesNum, minIdleNpuCycles, maxIdleNpuCycles);
+
+#if defined(CPU_PROFILE_ENABLED)
+                writeStatLine<uint64_t>(strResults, "Active CPU cycles (approx): ",
+                                        (totalCpuCycles - totalActiveNpuCycles),
+                                        samplesNum, minActiveCpuCycles,
+                                        maxActiveCpuCycles);
+
+                writeStatLine<time_t>(strResults, "Time in ms:                 ",
+                                        totalTimeMs, samplesNum, minTimeMs, maxTimeMs);
+#endif
+            } else {
+                strResults << "\tActive NPU cycles: " << totalActiveNpuCycles
+                           << std::endl;
+                strResults << "\tIdle NPU cycles:   "
+                           << (totalNpuCycles - totalActiveNpuCycles)
+                           << std::endl;
+#if defined(CPU_PROFILE_ENABLED)
+                strResults << "\tActive CPU cycles: "
+                           << (totalCpuCycles - totalActiveNpuCycles)
+                           << " (approx)" << std::endl;
+
+                strResults << "\tTime in ms:        " << totalTimeMs << std::endl;
+#endif
+            }
+        }
+        this->Reset();
+        return strResults.str();
+    }
+
+    void Profiler::SetName(const char* str)
+    {
+        this->_m_name = std::string(str);
+    }
+
+    void Profiler::_AddProfilingUnit(time_counter start, time_counter end,
+                                     const std::string& name)
+    {
+        platform_timer * timer = this->_m_pPlatform->timer;
+
+        struct ProfilingUnit unit;
+
+        if (timer->cap.npu_cycles && timer->get_npu_total_cycle_diff &&
+            timer->get_npu_active_cycle_diff)
+        {
+            unit.npuCycles = timer->get_npu_total_cycle_diff(&start, &end);
+            unit.activeNpuCycles = timer->get_npu_active_cycle_diff(&start, &end);
+        }
+
+        if (timer->cap.cpu_cycles && timer->get_cpu_cycle_diff) {
+            unit.cpuCycles = timer->get_cpu_cycle_diff(&start, &end);
+        }
+
+        if (timer->cap.duration_ms && timer->get_duration_ms) {
+            unit.time = timer->get_duration_ms(&start, &end);
+        }
+
+        this->_m_series[name].emplace_back(unit);
+    }
+
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/main/UseCaseCommonUtils.cc b/source/application/main/UseCaseCommonUtils.cc
new file mode 100644
index 0000000..4ea5e4d
--- /dev/null
+++ b/source/application/main/UseCaseCommonUtils.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseCommonUtils.hpp"
+
+#include "InputFiles.hpp"
+
+namespace arm {
+namespace app {
+
+    bool RunInference(hal_platform& platform, arm::app::Model& model)
+    {
+        Profiler profiler{&platform, "Inference"};
+        profiler.StartProfiling();
+
+        bool runInf = model.RunInference();
+
+        profiler.StopProfiling();
+        std::string profileResults = profiler.GetResultsAndReset();
+        info("%s\n", profileResults.c_str());
+
+        return runInf;
+    }
+
+    int ReadUserInputAsInt(hal_platform& platform)
+    {
+        char chInput[128];
+        memset(chInput, 0, sizeof(chInput));
+
+        platform.data_acq->get_input(chInput, sizeof(chInput));
+        return atoi(chInput);
+    }
+
+    void DumpTensor(TfLiteTensor* tensor, const size_t lineBreakForNumElements)
+    {
+        char strhex[8];
+        std::string strdump;
+
+        if (!tensor) {
+            printf_err("invalid tensor\n");
+            return;
+        }
+
+        const uint32_t tensorSz = tensor->bytes;
+        const uint8_t* tensorData = tflite::GetTensorData<uint8_t>(tensor);
+
+        for (size_t i = 0; i < tensorSz; ++i) {
+            if (0 == i % lineBreakForNumElements) {
+                printf("%s\n\t", strdump.c_str());
+                strdump.clear();
+            }
+            snprintf(strhex, sizeof(strhex) - 1,
+                     "0x%02x, ", tensorData[i]);
+            strdump += std::string(strhex);
+        }
+
+        if (strdump.size()) {
+            printf("%s\n", strdump.c_str());
+        }
+    }
+
+    bool ListFilesHandler(ApplicationContext& ctx)
+    {
+        auto& model = ctx.Get<Model&>("model");
+        auto& platform = ctx.Get<hal_platform&>("platform");
+
+        constexpr uint32_t dataPsnTxtStartX = 20;
+        constexpr uint32_t dataPsnTxtStartY = 40;
+
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        /* Clear the LCD */
+        platform.data_psn->clear(COLOR_BLACK);
+
+        /* Show the total number of embedded files. */
+        std::string strNumFiles = std::string{"Total Number of Files: "} +
+                                   std::to_string(NUMBER_OF_FILES);
+        platform.data_psn->present_data_text(strNumFiles.c_str(),
+                                             strNumFiles.size(),
+                                             dataPsnTxtStartX,
+                                             dataPsnTxtStartY,
+                                             0);
+
+#if NUMBER_OF_FILES > 0
+        constexpr uint32_t dataPsnTxtYIncr = 16;
+        info("List of Files:\n");
+        uint32_t yVal = dataPsnTxtStartY + dataPsnTxtYIncr;
+        for (uint32_t i = 0; i < NUMBER_OF_FILES; ++i, yVal += dataPsnTxtYIncr) {
+
+            std::string currentFilename{get_filename(i)};
+            platform.data_psn->present_data_text(currentFilename.c_str(),
+                                                 currentFilename.size(),
+                                                 dataPsnTxtStartX, yVal, 0);
+
+            info("\t%u => %s\n", i, currentFilename.c_str());
+        }
+#endif /* NUMBER_OF_FILES > 0 */
+
+        return true;
+    }
+
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/application/main/include/AppContext.hpp b/source/application/main/include/AppContext.hpp
new file mode 100644
index 0000000..588dfaa
--- /dev/null
+++ b/source/application/main/include/AppContext.hpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef APP_CTX_HPP
+#define APP_CTX_HPP
+
+#include <string>
+#include <map>
+
+namespace arm {
+namespace app {
+
+    class IAttribute
+    {
+    public:
+        virtual ~IAttribute() = default;
+    };
+
+    template<typename T>
+    class Attribute : public IAttribute
+    {
+    public:
+        ~Attribute() override = default;
+
+        explicit Attribute(const T value): _m_value(value){}
+
+        T Get()
+        {
+            return _m_value;
+        }
+    private:
+        T _m_value;
+    };
+
+    /* Application context class */
+    class ApplicationContext {
+    public:
+
+        /**
+         * @brief     Saves given value as a named attribute in the context.
+         * @tparam    T value type.
+         * @param[in] name     Context attribute name.
+         * @param[in] object   Value to save in the context.
+         */
+        template<typename T>
+        void Set(const std::string &name, T object)
+        {
+            this->_m_attributes[name] = new Attribute<T>(object);
+        }
+
+        /**
+         * @brief      Gets the saved attribute from the context by the given name.
+         * @tparam     T value type.
+         * @param[in]  name   Context attribute name.
+         * @return     Value saved in the context.
+         */
+        template<typename T>
+        T Get(const std::string &name)
+        {
+            auto a = (Attribute<T>*)_m_attributes[name];
+            return a->Get();
+        }
+
+        /**
+         * @brief      Checks if an attribute for a given name exists in the context.
+         * @param[in]  name   Attribute name.
+         * @return     true if attribute exists, false otherwise
+         */
+        bool Has(const std::string& name)
+        {
+            return _m_attributes.find(name) != _m_attributes.end();
+        }
+
+        ApplicationContext() = default;
+
+        ~ApplicationContext() {
+            for (auto& attribute : _m_attributes)
+                delete attribute.second;
+
+            this->_m_attributes.clear();
+        }
+    private:
+        std::map<std::string, IAttribute*> _m_attributes;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* APP_CTX_HPP */
diff --git a/source/application/main/include/AudioUtils.hpp b/source/application/main/include/AudioUtils.hpp
new file mode 100644
index 0000000..cba981d
--- /dev/null
+++ b/source/application/main/include/AudioUtils.hpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AUDIO_UTILS_HPP
+#define AUDIO_UTILS_HPP
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    template<class T>
+    class SlidingWindow {
+    public:
+
+        /**
+         * @brief     Creates the window slider through the given data.
+         *
+         * @param[in] data         Pointer to the data to slide through.
+         * @param[in] dataSize     Size in T type elements wise.
+         * @param[in] windowSize   Sliding window size in T type wise elements.
+         * @param[in] stride       Stride size in T type wise elements.
+         */
+        SlidingWindow(T *data, size_t dataSize,
+                      size_t windowSize, size_t stride) {
+            m_start = data;
+            m_dataSize = dataSize;
+            m_size = windowSize;
+            m_stride = stride;
+        }
+
+        SlidingWindow() = default;
+
+        ~SlidingWindow() = default;
+
+        /**
+         * @brief  Get the next data window.
+         * @return Pointer to the next window, if next window is not available nullptr is returned.
+         */
+        virtual T *Next() {
+            if (HasNext()) {
+                m_count++;
+                return m_start + Index() * m_stride;
+            } else {
+                return nullptr;
+            }
+        }
+
+        /**
+         * @brief  Checks if the next data portion is available.
+         * @return true if next data portion is available.
+         */
+        virtual bool HasNext() {
+            return m_size + m_count * m_stride <= m_dataSize;
+        }
+
+        /**
+         * @brief Reset the slider to the initial position.
+         */
+        virtual void Reset() {
+            m_count = 0;
+        }
+
+        /**
+         * @brief     Resets the slider to the start of the new data.
+         *            New data size MUST be the same as the old one.
+         * @param[in] newStart   Pointer to the new data to slide through.
+         */
+        virtual void Reset(T *newStart) {
+            m_start = newStart;
+            Reset();
+        }
+
+        /**
+         * @brief  Gets current index of the sliding window.
+         * @return Current position of the sliding window in number of strides.
+         */
+        size_t Index() {
+            return m_count == 0? 0: m_count - 1;
+        }
+
+        /**
+         * @brief  Gets the index from the start of the data where the next window will begin.
+         *         While Index() returns the index of sliding window itself this function
+         *         returns the index of the data element itself.
+         * @return Index from the start of the data where the next sliding window will begin.
+         */
+        virtual uint32_t NextWindowStartIndex() {
+            return m_count == 0? 0: ((m_count) * m_stride);
+        }
+
+        /**
+         * @brief     Go to given sliding window index.
+         * @param[in] index   New position of the sliding window. If index is invalid
+         *                    (greater than possible range of strides) then next call to Next() will return nullptr.
+         */
+        void FastForward(size_t index) {
+            m_count = index;
+        }
+
+        /**
+         * @brief  Calculates whole number of times the window can stride through the given data.
+         * @return Maximum number of whole strides.
+         */
+         size_t TotalStrides() {
+            if (m_size > m_dataSize) {
+                return 0;
+            }
+            return ((m_dataSize - m_size)/m_stride);
+        }
+
+        /**
+         * @brief  Calculates number of times the window can stride through the given data.
+         *         May not be a whole number.
+         * @return Number of strides to cover all data.
+         */
+        float FractionalTotalStrides() {
+            if (this->m_dataSize < this->m_size) {
+                return 0;
+            } else {
+                return ((this->m_dataSize - this->m_size)/ static_cast<float>(this->m_stride));
+            }
+        }
+
+    protected:
+        T *m_start = nullptr;
+        size_t m_dataSize = 0;
+        size_t m_size = 0;
+        size_t m_stride = 0;
+        size_t m_count = 0;
+    };
+
+    /*
+     * Sliding window for ASR will cover the whole of the input, even if
+     * this means the last window is not a full window length.
+     */
+    template<class T>
+    class ASRSlidingWindow : public SlidingWindow<T> {
+    public:
+        using SlidingWindow<T>::SlidingWindow;
+
+        /**
+         * @brief  Checks if the next data portion is available.
+         * @return true if next data portion is available.
+         */
+        bool HasNext() {
+            return this->m_count < 1 + this->FractionalTotalStrides() && (this->NextWindowStartIndex() < this->m_dataSize);
+        }
+    };
+
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* AUDIO_UTILS_HPP */
\ No newline at end of file
diff --git a/source/application/main/include/ClassificationResult.hpp b/source/application/main/include/ClassificationResult.hpp
new file mode 100644
index 0000000..eae28e4
--- /dev/null
+++ b/source/application/main/include/ClassificationResult.hpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef CLASSIFICATION_RESULT_HPP
+#define CLASSIFICATION_RESULT_HPP
+
+#include <string>
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief   Class representing a single classification result.
+     */
+    class ClassificationResult {
+    public:
+        double          m_normalisedVal = 0.0;
+        std::string     m_label;
+        uint32_t        m_labelIdx = 0;
+
+        ClassificationResult() = default;
+        ~ClassificationResult() = default;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* CLASSIFICATION_RESULT_HPP */
\ No newline at end of file
diff --git a/source/application/main/include/Classifier.hpp b/source/application/main/include/Classifier.hpp
new file mode 100644
index 0000000..510e6f9
--- /dev/null
+++ b/source/application/main/include/Classifier.hpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef CLASSIFIER_HPP
+#define CLASSIFIER_HPP
+
+#include "ClassificationResult.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief   Classifier - a helper class to get certain number of top
+     *          results from the output vector from a classification NN.
+     **/
+    class Classifier{
+    public:
+        /** @brief Constructor. */
+        Classifier() = default;
+
+        /**
+         * @brief       Gets the top N classification results from the
+         *              output vector.
+         * @param[in]   outputTensor   Inference output tensor from an NN model.
+         * @param[out]  vecResults     A vector of classification results.
+         *                             populated by this function.
+         * @param[in]   labels         Labels vector to match classified classes.
+         * @param[in]   topNCount      Number of top classifications to pick. Default is 1.
+         * @return      true if successful, false otherwise.
+         **/
+        virtual bool GetClassificationResults(
+            TfLiteTensor* outputTensor,
+            std::vector<ClassificationResult>& vecResults,
+            const std::vector <std::string>& labels, uint32_t topNCount);
+
+    private:
+        /**
+         * @brief       Utility function that gets the top N classification results from the
+         *              output vector.
+         * @tparam T value type
+         * @param[in]   tensor       Inference output tensor from an NN model.
+         * @param[out]  vecResults   A vector of classification results
+         *                           populated by this function.
+         * @param[in]   topNCount    Number of top classifications to pick.
+         * @param[in]   labels       Labels vector to match classified classes.
+         * @return      true if successful, false otherwise.
+         **/
+        template<typename T>
+        bool _GetTopNResults(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            uint32_t topNCount,
+                            const std::vector <std::string>& labels);
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* CLASSIFIER_HPP */
diff --git a/source/application/main/include/DataStructures.hpp b/source/application/main/include/DataStructures.hpp
new file mode 100644
index 0000000..5cc8b5e
--- /dev/null
+++ b/source/application/main/include/DataStructures.hpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DATA_STRUCTURES_HPP
+#define DATA_STRUCTURES_HPP
+
+#include "hal.h"
+
+#include <iterator>
+
+namespace arm {
+namespace app {
+
+    /**
+     * Class Array2d is a data structure that represents a two dimensional array.
+     * The data is allocated in contiguous memory, arranged row-wise
+     * and individual elements can be accessed with the () operator.
+     * For example a two dimensional array D of size (M, N) can be accessed:
+     *
+     *               _|<------------- col size = N  -------->|
+     *               |  D(r=0, c=0) D(r=0, c=1)... D(r=0, c=N)
+     *               |  D(r=1, c=0) D(r=1, c=1)... D(r=1, c=N)
+     *               |  ...
+     *    row size = M  ...
+     *               |  ...
+     *               _  D(r=M, c=0) D(r=M, c=1)... D(r=M, c=N)
+     *
+     */
+    template<typename T>
+    class Array2d {
+    public:
+        /**
+         * @brief     Creates the array2d with the given sizes.
+         * @param[in] rows   Number of rows.
+         * @param[in] cols   Number of columns.
+         */
+        Array2d(unsigned rows, unsigned cols)
+        {
+            if (rows == 0 || cols == 0) {
+                printf_err("Array2d constructor has 0 size.\n");
+                _m_data = nullptr;
+                return;
+            }
+            _m_rows = rows;
+            _m_cols = cols;
+            _m_data = new T[rows * cols];
+        }
+
+        ~Array2d()
+        {
+            delete[] _m_data;
+        }
+
+        T& operator() (unsigned int row, unsigned int col)
+        {
+#if defined(DEBUG)
+            if (row >= _m_rows || col >= _m_cols ||  _m_data == nullptr) {
+                printf_err("Array2d subscript out of bounds.\n");
+            }
+#endif /* defined(DEBUG) */
+            return _m_data[_m_cols * row + col];
+        }
+
+        T operator() (unsigned int row, unsigned int col) const
+        {
+#if defined(DEBUG)
+            if (row >= _m_rows || col >= _m_cols ||  _m_data == nullptr) {
+                printf_err("const Array2d subscript out of bounds.\n");
+            }
+#endif /* defined(DEBUG) */
+            return _m_data[_m_cols * row + col];
+        }
+
+        /**
+         * @brief  Gets rows number of the current array2d.
+         * @return Number of rows.
+         */
+        size_t size(size_t dim)
+        {
+            switch (dim)
+            {
+                case 0:
+                    return _m_rows;
+                case 1:
+                    return _m_cols;
+                default:
+                    return 0;
+            }
+        }
+
+        /**
+         * @brief Gets the array2d total size.
+         */
+        size_t totalSize()
+        {
+            return _m_rows * _m_cols;
+        }
+
+        /**
+         * array2d iterator.
+         */
+        using iterator=T*;
+        using const_iterator=T const*;
+
+        iterator begin() { return _m_data; }
+        iterator end() { return _m_data + totalSize(); }
+        const_iterator begin() const { return _m_data; }
+        const_iterator end() const { return _m_data + totalSize(); };
+
+    private:
+        size_t _m_rows;
+        size_t _m_cols;
+        T* _m_data;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* DATA_STRUCTURES_HPP */
\ No newline at end of file
diff --git a/source/application/main/include/Mfcc.hpp b/source/application/main/include/Mfcc.hpp
new file mode 100644
index 0000000..6379fab
--- /dev/null
+++ b/source/application/main/include/Mfcc.hpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MFCC_HPP
+#define MFCC_HPP
+
+#include "PlatformMath.hpp"
+
+#include <vector>
+#include <cstdint>
+#include <cmath>
+#include <limits>
+#include <string>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* MFCC's consolidated parameters. */
+    class MfccParams {
+    public:
+        float       m_samplingFreq;
+        uint32_t    m_numFbankBins;
+        float       m_melLoFreq;
+        float       m_melHiFreq;
+        uint32_t    m_numMfccFeatures;
+        uint32_t    m_frameLen;
+        uint32_t    m_frameLenPadded;
+        bool        m_useHtkMethod;
+
+        /** @brief  Constructor */
+        MfccParams(float samplingFreq, uint32_t numFbankBins,
+                   float melLoFreq, float melHiFreq,
+                   uint32_t numMfccFeats, uint32_t frameLen,
+                   bool useHtkMethod);
+
+        MfccParams()  = delete;
+
+        ~MfccParams() = default;
+
+        /** @brief  String representation of parameters */
+        std::string Str();
+    };
+
+    /**
+     * @brief   Class for MFCC feature extraction.
+     *          Based on https://github.com/ARM-software/ML-KWS-for-MCU/blob/master/Deployment/Source/MFCC/mfcc.cpp
+     *          This class is designed to be generic and self-sufficient but
+     *          certain calculation routines can be overridden to accommodate
+     *          use-case specific requirements.
+     */
+    class MFCC {
+    public:
+        /**
+         * @brief       Constructor
+         * @param[in]   params   MFCC parameters
+        */
+        explicit MFCC(const MfccParams& params);
+
+        MFCC() = delete;
+
+        ~MFCC() = default;
+
+        /**
+        * @brief        Extract MFCC  features for one single small frame of
+        *               audio data e.g. 640 samples.
+        * @param[in]    audioData   Vector of audio samples to calculate
+        *                           features for.
+        * @return       Vector of extracted MFCC features.
+        **/
+        std::vector<float> MfccCompute(const std::vector<int16_t>& audioData);
+
+        /** @brief  Initialise. */
+        void Init();
+
+       /**
+        * @brief        Extract MFCC features and quantise for one single small
+        *               frame of audio data e.g. 640 samples.
+        * @param[in]    audioData     Vector of audio samples to calculate
+        *                             features for.
+        * @param[in]    quantScale    Quantisation scale.
+        * @param[in]    quantOffset   Quantisation offset.
+        * @return       Vector of extracted quantised MFCC features.
+        **/
+        template<typename T>
+        std::vector<T> MfccComputeQuant(const std::vector<int16_t>& audioData,
+                                        const float quantScale,
+                                        const int quantOffset)
+        {
+            this->_MfccComputePreFeature(audioData);
+            float minVal = std::numeric_limits<T>::min();
+            float maxVal = std::numeric_limits<T>::max();
+
+            std::vector<T> mfccOut(this->_m_params.m_numMfccFeatures);
+            const size_t numFbankBins = this->_m_params.m_numFbankBins;
+
+            /* Take DCT. Uses matrix mul. */
+            for (size_t i = 0, j = 0; i < mfccOut.size(); ++i, j += numFbankBins) {
+                float sum = 0;
+                for (size_t k = 0; k < numFbankBins; ++k) {
+                    sum += this->_m_dctMatrix[j + k] * this->_m_melEnergies[k];
+                }
+                /* Quantize to T. */
+                sum = std::round((sum / quantScale) + quantOffset);
+                mfccOut[i] = static_cast<T>(std::min<float>(std::max<float>(sum, minVal), maxVal));
+            }
+
+            return mfccOut;
+        }
+
+        /* Constants */
+        static constexpr float ms_logStep = /*logf(6.4)*/ 1.8562979903656 / 27.0;
+        static constexpr float ms_freqStep = 200.0 / 3;
+        static constexpr float ms_minLogHz = 1000.0;
+        static constexpr float ms_minLogMel = ms_minLogHz / ms_freqStep;
+
+    protected:
+        /**
+         * @brief       Project input frequency to Mel Scale.
+         * @param[in]   freq           Input frequency in floating point.
+         * @param[in]   useHTKmethod   bool to signal if HTK method is to be
+         *                             used for calculation.
+         * @return      Mel transformed frequency in floating point.
+         **/
+        static float MelScale(float freq,
+                              bool  useHTKMethod = true);
+
+        /**
+         * @brief       Inverse Mel transform - convert MEL warped frequency
+         *              back to normal frequency.
+         * @param[in]   freq           Mel frequency in floating point.
+         * @param[in]   useHTKmethod   bool to signal if HTK method is to be
+         *                             used for calculation.
+         * @return      Real world frequency in floating point.
+         **/
+        static float InverseMelScale(float melFreq,
+                                     bool  useHTKMethod = true);
+
+        /**
+         * @brief       Populates MEL energies after applying the MEL filter
+         *              bank weights and adding them up to be placed into
+         *              bins, according to the filter bank's first and last
+         *              indices (pre-computed for each filter bank element
+         *              by _CreateMelFilterBank function).
+         * @param[in]   fftVec                  Vector populated with FFT magnitudes.
+         * @param[in]   melFilterBank           2D Vector with filter bank weights.
+         * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
+         *                                      to be used for each bin.
+         * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
+         *                                      to be used for each bin.
+         * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
+         *                                      populated.
+         * @return      true if successful, false otherwise.
+         */
+        virtual bool ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies);
+
+        /**
+         * @brief           Converts the Mel energies for logarithmic scale.
+         * @param[in,out]   melEnergies   1D vector of Mel energies.
+         **/
+        virtual void ConvertToLogarithmicScale(std::vector<float>& melEnergies);
+
+        /**
+         * @brief       Create a matrix used to calculate Discrete Cosine
+         *              Transform.
+         * @param[in]   inputLength        Input length of the buffer on which
+         *                                 DCT will be performed.
+         * @param[in]   coefficientCount   Total coefficients per input length.
+         * @return      1D vector with inputLength x coefficientCount elements
+         *              populated with DCT coefficients.
+         */
+        virtual std::vector<float> CreateDCTMatrix(
+                                    int32_t inputLength,
+                                    int32_t coefficientCount);
+
+        /**
+         * @brief       Given the low and high Mel values, get the normaliser
+         *              for weights to be applied when populating the filter
+         *              bank.
+         * @param[in]   leftMel        Low Mel frequency value.
+         * @param[in]   rightMel       High Mel frequency value.
+         * @param[in]   useHTKMethod   bool to signal if HTK method is to be
+         *                             used for calculation.
+         * @return      Value to use for normalizing.
+         */
+        virtual float GetMelFilterBankNormaliser(
+                        const float&   leftMel,
+                        const float&   rightMel,
+                        bool     useHTKMethod);
+
+    private:
+        MfccParams                      _m_params;
+        std::vector<float>              _m_frame;
+        std::vector<float>              _m_buffer;
+        std::vector<float>              _m_melEnergies;
+        std::vector<float>              _m_windowFunc;
+        std::vector<std::vector<float>> _m_melFilterBank;
+        std::vector<float>              _m_dctMatrix;
+        std::vector<int32_t>            _m_filterBankFilterFirst;
+        std::vector<int32_t>            _m_filterBankFilterLast;
+        bool                            _m_filterBankInitialised;
+        arm::app::math::FftInstance     _m_fftInstance;
+
+        /**
+         * @brief       Initialises the filter banks and the DCT matrix. **/
+        void _InitMelFilterBank();
+
+        /**
+         * @brief       Signals whether the instance of MFCC has had its
+         *              required buffers initialised.
+         * @return      true if initialised, false otherwise.
+         **/
+        bool _IsMelFilterBankInited();
+
+        /**
+         * @brief       Create mel filter banks for MFCC calculation.
+         * @return      2D vector of floats.
+         **/
+        std::vector<std::vector<float>> _CreateMelFilterBank();
+
+        /**
+         * @brief       Computes and populates internal memeber buffers used
+         *              in MFCC feature calculation
+         * @param[in]   audioData   1D vector of 16-bit audio data.
+         */
+        void _MfccComputePreFeature(const std::vector<int16_t>& audioData);
+
+        /** @brief       Computes the magnitude from an interleaved complex array. */
+        void _ConvertToPowerSpectrum();
+
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* MFCC_HPP */
\ No newline at end of file
diff --git a/source/application/main/include/PlatformMath.hpp b/source/application/main/include/PlatformMath.hpp
new file mode 100644
index 0000000..45e6a9e
--- /dev/null
+++ b/source/application/main/include/PlatformMath.hpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef PLATFORM_MATH_HPP
+#define PLATFORM_MATH_HPP
+
+#include "hal.h"
+
+/* See if ARM DSP functions can be used. */
+#if PLATFORM_HAL == PLATFORM_CORTEX_M_BAREMETAL
+    #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U)
+
+        #define ARM_DSP_AVAILABLE   (1U)
+        #include "arm_math.h"
+        #define M_PI    (PI)
+
+    #endif /* defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) */
+#endif /* PLATFORM_HAL == PLATFORM_CORTEX_M_BAREMETAL */
+
+#include <vector>
+
+namespace arm {
+namespace app {
+namespace math {
+
+    struct FftInstance {
+#if ARM_DSP_AVAILABLE
+        arm_rfft_fast_instance_f32 instance;
+#endif
+        bool initialised = false;
+    };
+
+    /* Class to provide Math functions like FFT, mean, stddev etc.
+     * This will allow other classes, functions to be independent of
+     * #if definition checks and provide a cleaner API. Also, it will
+     * consolidate all arm math functions used in one place and make
+     * them easier to test. */
+    class MathUtils {
+
+    public:
+        /**
+         * @brief       Get the cosine value of the argument in floating point.
+         * @param[in]   radians   Angle in radians.
+         * @return      Cosine value (floating point).
+         */
+        static float CosineF32(float radians);
+
+        /**
+         * @brief       Get the square root of the argument in floating point.
+         * @param[in]   input   Value to compute square root of.
+         * @return      Square root (floating point) value.
+         */
+        static float SqrtF32(float input);
+
+        /**
+         * @brief       Gets the mean of a floating point array of elements.
+         * @param[in]   ptrSrc   Pointer to the first element.
+         * @param[in]   srcLen   Number of elements in the array/vector.
+         * @return      Average value.
+         */
+        static float MeanF32(float* ptrSrc, uint32_t srcLen);
+
+        /**
+         * @brief       Gets the standard deviation of a floating point array
+         *              of elements.
+         * @param[in]   ptrSrc   Pointer to the first element.
+         * @param[in]   srcLen   Number of elements in the array/vector.
+         * @param[in]   mean     Pre-computed mean value.
+         * @return      Standard deviation value.
+         */
+        static float StdDevF32(float* ptrSrc, uint32_t srcLen,
+                               float mean);
+
+        /**
+         * @brief       Initialises the internal FFT structures (if available
+         *              for the platform). This function should be called
+         *              prior to Fft32 function call if built with ARM DSP functions.
+         * @param[in]   fftLen        Requested length of the FFT.
+         * @param[in]   fftInstance   FFT instance struct to use.
+         * @return      true if successful, false otherwise.
+         */
+        static bool FftInitF32(const uint16_t fftLen, arm::app::math::FftInstance& fftInstance);
+
+        /**
+         * @brief       Computes the FFT for the input vector.
+         * @param[in]   input       Floating point vector of input elements
+         * @param[out]  fftOutput   Output buffer to be populated by computed FFTs.
+         * @param[in]   fftInstance FFT instance struct to use.
+         */
+        static void FftF32(std::vector<float>& input,
+                           std::vector<float>& fftOutput,
+                           arm::app::math::FftInstance& fftInstance);
+
+        /**
+         * @brief       Computes the natural logarithms of input floating point
+         *              vector
+         * @param[in]   input    Floating point input vector
+         * @param[out]  output   Pre-allocated buffer to be populated with
+         *                       natural log values of each input element.
+         */
+        static void VecLogarithmF32(std::vector <float>& input,
+                                    std::vector <float>& output);
+
+        /**
+         * @brief       Computes the dot product of two 1D floating point
+         *              vectors.
+         *              result = sum(srcA[0]*srcB[0] + srcA[1]*srcB[1] + ..)
+         * @param[in]   srcPtrA   Pointer to the first element of first
+         *                        array.
+         * @param[in]   srcPtrB   Pointer to the first element of second
+         *                        array.
+         * @param[in]   srcLen    Number of elements in the array/vector.
+         * @return      Dot product.
+         */
+        static float DotProductF32(float* srcPtrA, float* srcPtrB,
+                                   const uint32_t srcLen);
+
+        /**
+         * @brief       Computes the squared magnitude of floating point
+         *              complex number array.
+         * @param[in]   ptrSrc   Pointer to the first element of input
+         *                       array.
+         * @param[in]   srcLen   Number of elements in the array/vector.
+         * @param[out]  ptrDst   Output buffer to be populated.
+         * @param[in]   dstLen   Output buffer len (for sanity check only).
+         * @return      true if successful, false otherwise.
+         */
+        static bool ComplexMagnitudeSquaredF32(float* ptrSrc,
+                                               const uint32_t srcLen,
+                                               float* ptrDst,
+                                               const uint32_t dstLen);
+
+    };
+} /* namespace math */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* PLATFORM_MATH_HPP */
\ No newline at end of file
diff --git a/source/application/main/include/Profiler.hpp b/source/application/main/include/Profiler.hpp
new file mode 100644
index 0000000..b16a63b
--- /dev/null
+++ b/source/application/main/include/Profiler.hpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef APP_PROFILER_HPP
+#define APP_PROFILER_HPP
+
+#include "hal.h"
+
+#include <string>
+#include <map>
+#include <vector>
+
+namespace arm {
+namespace app {
+
+    /** A single profiling unit definition. */
+    struct ProfilingUnit {
+        uint64_t npuCycles = 0;
+        uint64_t activeNpuCycles = 0;
+        uint64_t cpuCycles = 0;
+        time_t time = 0;
+    };
+
+    /* A collection of profiling units. */
+    using ProfilingSeries = std::vector<arm::app::ProfilingUnit>;
+
+    /* A map for string identifiable profiling series. */
+    using ProfilingMap = std::map<std::string, ProfilingSeries>;
+
+    /**
+     * @brief   A very simple profiler example using the platform timer
+     *          implementation.
+     */
+    class Profiler {
+    public:
+        /**
+         * @brief       Constructor for profiler.
+         * @param[in]   platform   Pointer to a valid, initialised hal platform.
+         * @param[in]   name       A friendly name for this profiler.
+         **/
+        Profiler(hal_platform* platform, const char* name);
+
+        /** Block the default constructor. */
+        Profiler() = delete;
+
+        /** Default destructor. */
+        ~Profiler() = default;
+
+        /** @brief  Start profiling => get starting time-stamp. */
+        bool StartProfiling(const char* name = nullptr);
+
+        /** @brief  Stop profiling => get the ending time-stamp. */
+        bool StopProfiling();
+
+        /** @brief  Stops the profiling and internally resets the
+         *          platform timers. */
+        bool StopProfilingAndReset();
+
+        /** @brief  Reset the platform timers. */
+        void Reset();
+
+        /**
+         * @brief   Gets the results as string and resets the profiler.
+         * @returns Result string.
+         **/
+        std::string GetResultsAndReset();
+
+        /** @brief Set the profiler name. */
+        void SetName(const char* str);
+
+    private:
+        ProfilingMap    _m_series;                /* Profiling series map. */
+        time_counter    _m_tstampSt;              /* Container for a current starting timestamp. */
+        time_counter    _m_tstampEnd;             /* Container for a current ending timestamp. */
+        hal_platform *  _m_pPlatform = nullptr;   /* Platform pointer - to get the timer. */
+
+        bool            _m_started = false;       /* Indicates profiler has been started. */
+
+        std::string     _m_name;                  /* Name given to this profiler. */
+
+        /**
+         * @brief       Appends the profiling unit computed by the "start" and
+         *              "end" timestamps to the profiling series identified by
+         *              the name provided.
+         * @param[in]   start   Starting time-stamp.
+         * @param[in]   end     Ending time-stamp.
+         * @param[in]   name    Name for the profiling unit series to be
+         *                      appended to.
+         **/
+        void _AddProfilingUnit(time_counter start, time_counter end,
+                               const std::string& name);
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* APP_PROFILER_HPP */
diff --git a/source/application/main/include/UseCaseCommonUtils.hpp b/source/application/main/include/UseCaseCommonUtils.hpp
new file mode 100644
index 0000000..02200e8
--- /dev/null
+++ b/source/application/main/include/UseCaseCommonUtils.hpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef USECASE_COMMON_UTILS_HPP
+#define USECASE_COMMON_UTILS_HPP
+
+#include "hal.h"
+#include "Model.hpp"
+#include "AppContext.hpp"
+#include "Profiler.hpp"
+
+/* Helper macro to convert RGB888 to RGB565 format. */
+#define RGB888_TO_RGB565(R8,G8,B8)  ((((R8>>3) & 0x1F) << 11) |     \
+                                     (((G8>>2) & 0x3F) << 5)  |     \
+                                     ((B8>>3) & 0x1F))
+
+constexpr uint16_t COLOR_BLACK  = 0;
+constexpr uint16_t COLOR_GREEN  = RGB888_TO_RGB565(  0, 255,  0); // 2016;
+constexpr uint16_t COLOR_YELLOW = RGB888_TO_RGB565(255, 255,  0); // 65504;
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief           Run inference using given model
+     *                  object. If profiling is enabled, it will log the
+     *                  statistics too.
+     * @param[in]       platform   Reference to the hal platform object.
+     * @param[in]       model      Reference to the initialised model.
+     * @return          true if inference succeeds, false otherwise.
+     **/
+    bool RunInference(hal_platform& platform, arm::app::Model& model);
+
+    /**
+     * @brief           Read input and return as an integer.
+     * @param[in]       platform   Reference to the hal platform object.
+     * @param[in]       model      Reference to the initialised model.
+     * @return          Integer value corresponding to the user input.
+     **/
+    int ReadUserInputAsInt(hal_platform& platform);
+
+#if VERIFY_TEST_OUTPUT
+    /**
+     * @brief       Helper function to dump a tensor to stdout
+     * @param[in]   tensor  tensor to be dumped
+     * @param[in]   lineBreakForNumElements     number of elements
+     *              after which line break will be added.
+     **/
+    void DumpTensor(TfLiteTensor* tensor,
+                    const size_t lineBreakForNumElements = 16);
+#endif /* VERIFY_TEST_OUTPUT */
+
+    /**
+     * @brief       List the files baked in the application.
+     * @param[in]   ctx   Reference to the application context.
+     * @return      true or false based on event being handled.
+     **/
+    bool ListFilesHandler(ApplicationContext& ctx);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* USECASE_COMMON_UTILS_HPP */
\ No newline at end of file
diff --git a/source/application/tensorflow-lite-micro/Model.cc b/source/application/tensorflow-lite-micro/Model.cc
new file mode 100644
index 0000000..0775467
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/Model.cc
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Model.hpp"
+
+#include "hal.h"
+
+#include <cstdint>
+
+/* Initialise the model */
+arm::app::Model::~Model()
+{
+    if (this->_m_pInterpreter) {
+        delete this->_m_pInterpreter;
+    }
+
+    /**
+     * No clean-up function available for allocator in TensorFlow Lite Micro yet.
+     **/
+}
+
+arm::app::Model::Model() :
+    _m_inited (false),
+    _m_type(kTfLiteNoType)
+{
+    this->_m_pErrorReporter = &this->_m_uErrorReporter;
+}
+
+bool arm::app::Model::Init(tflite::MicroAllocator* allocator)
+{
+    /* Following tf lite micro example:
+     * Map the model into a usable data structure. This doesn't involve any
+     * copying or parsing, it's a very lightweight operation. */
+    const uint8_t* model_addr = ModelPointer();
+    debug("loading model from @ 0x%p\n", model_addr);
+    this->_m_pModel = ::tflite::GetModel(model_addr);
+
+    if (this->_m_pModel->version() != TFLITE_SCHEMA_VERSION) {
+        this->_m_pErrorReporter->Report(
+            "[ERROR] model's schema version %d is not equal "
+            "to supported version %d.",
+            this->_m_pModel->version(), TFLITE_SCHEMA_VERSION);
+        return false;
+    }
+
+    /* Pull in only the operation implementations we need.
+     * This relies on a complete list of all the ops needed by this graph.
+     * An easier approach is to just use the AllOpsResolver, but this will
+     * incur some penalty in code space for op implementations that are not
+     * needed by this graph.
+     * static ::tflite::ops::micro::AllOpsResolver resolver; */
+    /* NOLINTNEXTLINE(runtime-global-variables) */
+    debug("loading op resolver\n");
+
+    this->EnlistOperations();
+
+    /* Create allocator instance, if it doesn't exist */
+    this->_m_pAllocator = allocator;
+    if (!this->_m_pAllocator) {
+        /* Create an allocator instance */
+        info("Creating allocator using tensor arena in %s\n",
+            ACTIVATION_BUF_SECTION_NAME);
+
+        this->_m_pAllocator = tflite::MicroAllocator::Create(
+                                        this->GetTensorArena(),
+                                        this->GetActivationBufferSize(),
+                                        this->_m_pErrorReporter);
+
+        if (!this->_m_pAllocator) {
+            printf_err("Failed to create allocator\n");
+            return false;
+        }
+        debug("Created new allocator @ 0x%p\n", this->_m_pAllocator);
+    } else {
+        debug("Using existing allocator @ 0x%p\n", this->_m_pAllocator);
+    }
+
+    this->_m_pInterpreter = new ::tflite::MicroInterpreter(
+        this->_m_pModel, this->GetOpResolver(),
+        this->_m_pAllocator, this->_m_pErrorReporter);
+
+    if (!this->_m_pInterpreter) {
+        printf_err("Failed to allocate interpreter\n");
+        return false;
+    }
+
+    /* Allocate memory from the tensor_arena for the model's tensors. */
+    info("Allocating tensors\n");
+    TfLiteStatus allocate_status = this->_m_pInterpreter->AllocateTensors();
+
+    if (allocate_status != kTfLiteOk) {
+        this->_m_pErrorReporter->Report("[ERROR] allocateTensors() failed");
+        printf_err("tensor allocation failed!\n");
+        delete this->_m_pInterpreter;
+        return false;
+    }
+
+    /* Get information about the memory area to use for the model's input. */
+    this->_m_input.resize(this->GetNumInputs());
+    for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++)
+        this->_m_input[inIndex] = this->_m_pInterpreter->input(inIndex);
+
+    this->_m_output.resize(this->GetNumOutputs());
+    for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++)
+        this->_m_output[outIndex] = this->_m_pInterpreter->output(outIndex);
+
+    if (this->_m_input.empty() || this->_m_output.empty()) {
+        printf_err("failed to get tensors\n");
+        return false;
+    } else {
+        this->_m_type = this->_m_input[0]->type;  /* Input 0 should be the main input */
+
+        /* Clear the input & output tensors */
+        for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) {
+            std::memset(this->_m_input[inIndex]->data.data, 0, this->_m_input[inIndex]->bytes);
+        }
+        for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++) {
+            std::memset(this->_m_output[outIndex]->data.data, 0, this->_m_output[outIndex]->bytes);
+        }
+
+        this->LogInterpreterInfo();
+    }
+
+    this->_m_inited = true;
+    return true;
+}
+
+tflite::MicroAllocator* arm::app::Model::GetAllocator()
+{
+    if (this->IsInited()) {
+        return this->_m_pAllocator;
+    }
+    return nullptr;
+}
+
+void arm::app::Model::LogTensorInfo(TfLiteTensor* tensor)
+{
+    if (!tensor) {
+        printf_err("Invalid tensor\n");
+        assert(tensor);
+        return;
+    }
+
+    debug("\ttensor is assigned to 0x%p\n", tensor);
+    info("\ttensor type is %s\n", TfLiteTypeGetName(tensor->type));
+    info("\ttensor occupies %u bytes with dimensions\n",
+         (uint32_t)tensor->bytes);
+    for (int i = 0 ; i < tensor->dims->size; ++i) {
+        info ("\t\t%d: %3d\n", i, tensor->dims->data[i]);
+    }
+
+    TfLiteQuantization quant = tensor->quantization;
+    if (kTfLiteAffineQuantization == quant.type) {
+        auto* quantParams = (TfLiteAffineQuantization*)quant.params;
+        info("Quant dimension: %u\n", quantParams->quantized_dimension);
+        for (int i = 0; i < quantParams->scale->size; ++i) {
+            info("Scale[%d] = %f\n", i, quantParams->scale->data[i]);
+        }
+        for (int i = 0; i < quantParams->zero_point->size; ++i) {
+            info("ZeroPoint[%d] = %d\n", i, quantParams->zero_point->data[i]);
+        }
+    }
+}
+
+void arm::app::Model::LogInterpreterInfo()
+{
+    if (!this->_m_pInterpreter) {
+        printf_err("Invalid interpreter\n");
+        return;
+    }
+
+    info("Model INPUT tensors: \n");
+    for (auto input : this->_m_input) {
+        this->LogTensorInfo(input);
+    }
+
+    info("Model OUTPUT tensors: \n");
+    for (auto output : this->_m_output) {
+        this->LogTensorInfo(output);
+    }
+
+    info("Activation buffer (a.k.a tensor arena) size used: %zu\n",
+        this->_m_pInterpreter->arena_used_bytes());
+
+    const uint32_t nOperators = this->_m_pInterpreter->operators_size();
+    info("Number of operators: %u\n", nOperators);
+
+    /* For each operator, display registration information */
+    for (uint32_t i = 0 ; i < nOperators; ++i) {
+        const tflite::NodeAndRegistration nodeReg =
+            this->_m_pInterpreter->node_and_registration(i);
+        const TfLiteRegistration* reg = nodeReg.registration;
+        std::string opName{""};
+
+        if (reg) {
+            if (tflite::BuiltinOperator_CUSTOM == reg->builtin_code) {
+                opName = std::string(reg->custom_name);
+            } else {
+                opName = std::string(EnumNameBuiltinOperator(
+                            tflite::BuiltinOperator(reg->builtin_code)));
+            }
+        }
+        info("\tOperator %u: %s\n", i, opName.c_str());
+    }
+}
+
+bool arm::app::Model::IsInited() const
+{
+    return this->_m_inited;
+}
+
+bool arm::app::Model::IsDataSigned() const
+{
+    return this->GetType() == kTfLiteInt8;
+}
+
+bool arm::app::Model::RunInference()
+{
+    bool inference_state = false;
+    if (this->_m_pModel && this->_m_pInterpreter) {
+        if (kTfLiteOk != this->_m_pInterpreter->Invoke()) {
+            printf_err("Invoke failed.\n");
+        } else {
+            inference_state = true;
+        }
+    } else {
+        printf_err("Error: No interpreter!\n");
+    }
+    return inference_state;
+}
+
+TfLiteTensor* arm::app::Model::GetInputTensor(size_t index) const
+{
+    if (index < this->GetNumInputs()) {
+        return this->_m_input.at(index);
+    }
+    return nullptr;
+}
+
+TfLiteTensor* arm::app::Model::GetOutputTensor(size_t index) const
+{
+    if (index < this->GetNumOutputs()) {
+        return this->_m_output.at(index);
+    }
+    return nullptr;
+}
+
+size_t arm::app::Model::GetNumInputs() const
+{
+    if (this->_m_pModel && this->_m_pInterpreter) {
+        return this->_m_pInterpreter->inputs_size();
+    }
+    return 0;
+}
+
+size_t arm::app::Model::GetNumOutputs() const
+{
+    if (this->_m_pModel && this->_m_pInterpreter) {
+        return this->_m_pInterpreter->outputs_size();
+    }
+    return 0;
+}
+
+
+TfLiteType arm::app::Model::GetType() const
+{
+    return this->_m_type;
+}
+
+TfLiteIntArray* arm::app::Model::GetInputShape(size_t index) const
+{
+    if (index < this->GetNumInputs()) {
+        return this->_m_input.at(index)->dims;
+    }
+    return nullptr;
+}
+
+TfLiteIntArray* arm::app::Model::GetOutputShape(size_t index) const
+{
+    if (index < this->GetNumOutputs()) {
+        return this->_m_output.at(index)->dims;
+    }
+    return nullptr;
+}
+
+bool arm::app::Model::ShowModelInfoHandler()
+{
+    if (!this->IsInited()) {
+        printf_err("Model is not initialised! Terminating processing.\n");
+        return false;
+    }
+
+    PrintTensorFlowVersion();
+    info("Model info:\n");
+    this->LogInterpreterInfo();
+
+#if defined(ARM_NPU)
+    info("Use of Arm uNPU is enabled\n");
+#else   /* ARM_NPU */
+    info("Use of Arm uNPU is disabled\n");
+#endif  /* ARM_NPU */
+
+    return true;
+}
+namespace arm {
+namespace app {
+    static uint8_t  _tensor_arena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+} /* namespace app */
+} /* namespace arm */
+
+size_t arm::app::Model::GetActivationBufferSize()
+{
+    return ACTIVATION_BUF_SZ;
+}
+
+uint8_t *arm::app::Model::GetTensorArena()
+{
+    return _tensor_arena;
+}
\ No newline at end of file
diff --git a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
new file mode 100644
index 0000000..ce36a8f
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "TensorFlowLiteMicro.hpp"
+
+#include "hal.h"
+
+void PrintTensorFlowVersion()
+{
+    info("uTFL version: %u.%u.%u\n", TF_MAJOR_VERSION, TF_MINOR_VERSION,
+        TF_PATCH_VERSION);
+}
+
+arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor)
+{
+    arm::app::QuantParams params;
+    if (kTfLiteAffineQuantization == tensor->quantization.type) {
+        auto* quantParams = (TfLiteAffineQuantization*) (tensor->quantization.params);
+        if (quantParams && 0 == quantParams->quantized_dimension) {
+            if (quantParams->scale->size) {
+                params.scale = quantParams->scale->data[0];
+            }
+            if (quantParams->zero_point->size) {
+                params.offset = quantParams->zero_point->data[0];
+            }
+        } else if (tensor->params.scale != 0.0) {
+            /* Legacy tensorflow quantisation parameters */
+            params.scale = tensor->params.scale;
+            params.offset = tensor->params.zero_point;
+        }
+    }
+    return params;
+}
+
diff --git a/source/application/tensorflow-lite-micro/include/BufAttributes.hpp b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp
new file mode 100644
index 0000000..126172b
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BUF_ATTRIBUTES_HPP
+#define BUF_ATTRIBUTES_HPP
+
+#ifdef __has_attribute
+#define HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else   /* __has_attribute */
+#define HAVE_ATTRIBUTE(x) 0
+#endif  /* __has_attribute */
+
+#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
+
+/* We want all buffers/sections to be aligned to 16 byte.  */
+#define ALIGNMENT_REQ               aligned(16)
+
+/* Model data section name. */
+#define MODEL_SECTION               section("nn_model")
+
+/* Label section name */
+#define LABEL_SECTION               section("labels")
+
+#ifndef ACTIVATION_BUF_SZ
+    #warning  "ACTIVATION_BUF_SZ needs to be defined. Using default value"
+    #define ACTIVATION_BUF_SZ       0x00200000
+#endif  /* ACTIVATION_BUF_SZ */
+
+#ifndef ACTIVATION_BUF_SRAM_SZ
+    #warning  "ACTIVATION_BUF_SRAM_SZ needs to be defined. Using default value = 0"
+    #define ACTIVATION_BUF_SRAM_SZ  0x00000000
+#endif /* ACTIVATION_BUF_SRAM_SZ */
+
+/**
+ * Activation buffer aka tensor arena section name
+ * We have to place the tensor arena in different region based on its size.
+ * If it fits in SRAM, we place it there, and also mark it by giving it a
+ * different section name. The scatter file places the ZI data in DDR and
+ * the uninitialised region in the SRAM.
+ **/
+#define ACTIVATION_BUF_SECTION_SRAM section(".bss.NoInit.activation_buf")
+#define ACTIVATION_BUF_SECTION_DRAM section("activation_buf")
+
+#if     ACTIVATION_BUF_SZ > ACTIVATION_BUF_SRAM_SZ /* Will buffer not fit in SRAM? */
+    #define ACTIVATION_BUF_SECTION      ACTIVATION_BUF_SECTION_DRAM
+    #define ACTIVATION_BUF_SECTION_NAME ("DDR")
+#else   /* ACTIVATION_BUF_SZ > 0x00200000 */
+    #define ACTIVATION_BUF_SECTION  ACTIVATION_BUF_SECTION_SRAM
+    #define ACTIVATION_BUF_SECTION_NAME ("SRAM")
+#endif  /* ACTIVATION_BUF_SZ > 0x00200000 */
+
+/* IFM section name. */
+#define IFM_BUF_SECTION             section("ifm")
+
+/* Form the attributes, alignment is mandatory. */
+#define MAKE_ATTRIBUTE(x)           __attribute__((ALIGNMENT_REQ, x))
+#define MODEL_TFLITE_ATTRIBUTE      MAKE_ATTRIBUTE(MODEL_SECTION)
+#define ACTIVATION_BUF_ATTRIBUTE    MAKE_ATTRIBUTE(ACTIVATION_BUF_SECTION)
+#define IFM_BUF_ATTRIBUTE           MAKE_ATTRIBUTE(IFM_BUF_SECTION)
+#define LABELS_ATTRIBUTE            MAKE_ATTRIBUTE(LABEL_SECTION)
+
+#else /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */
+
+#define MODEL_TFLITE_ATTRIBUTE
+#define ACTIVATION_BUF_ATTRIBUTE
+#define IFM_BUF_ATTRIBUTE
+#define LABELS_ATTRIBUTE
+
+#endif /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */
+
+#endif /* BUF_ATTRIBUTES_HPP */
\ No newline at end of file
diff --git a/source/application/tensorflow-lite-micro/include/Model.hpp b/source/application/tensorflow-lite-micro/include/Model.hpp
new file mode 100644
index 0000000..70cf9ca
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/include/Model.hpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MODEL_HPP
+#define MODEL_HPP
+
+#include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
+
+#include <cstdint>
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief   NN model class wrapping the underlying TensorFlow-Lite-Micro API.
+     */
+    class Model {
+    public:
+        /** @brief Constructor. */
+        Model();
+
+        /** @brief Destructor. */
+        ~Model();
+
+        /** @brief  Gets the pointer to the model's input tensor at given input index. */
+        TfLiteTensor* GetInputTensor(size_t index) const;
+
+        /** @brief  Gets the pointer to the model's output tensor at given output index. */
+        TfLiteTensor* GetOutputTensor(size_t index) const;
+
+        /** @brief  Gets the model's data type. */
+        TfLiteType GetType() const;
+
+        /** @brief  Gets the pointer to the model's input shape. */
+        TfLiteIntArray* GetInputShape(size_t index) const;
+
+        /** @brief  Gets the pointer to the model's output shape at given output index. */
+        TfLiteIntArray* GetOutputShape(size_t index) const;
+
+        /** @brief  Gets the number of input tensors the model has. */
+        size_t GetNumInputs() const;
+
+        /** @brief  Gets the number of output tensors the model has. */
+        size_t GetNumOutputs() const;
+
+        /** @brief  Logs the tensor information to stdout. */
+        void LogTensorInfo(TfLiteTensor* tensor);
+
+        /** @brief  Logs the interpreter information to stdout. */
+        void LogInterpreterInfo();
+
+        /** @brief      Initialise the model class object.
+         *  @param[in]  allocator   Optional: a pre-initialised micro allocator pointer,
+         *                          if available. If supplied, this allocator will be used
+         *                          to create the interpreter instance.
+         *  @return     true if initialisation succeeds, false otherwise.
+        **/
+        bool Init(tflite::MicroAllocator* allocator = nullptr);
+
+        /**
+         * @brief       Gets the allocator pointer for this instance.
+         * @return      Pointer to a tflite::MicroAllocator object, if
+         *              available; nullptr otherwise.
+         **/
+        tflite::MicroAllocator* GetAllocator();
+
+        /** @brief  Checks if this object has been initialised. */
+        bool IsInited() const;
+
+        /** @brief  Checks if the model uses signed data. */
+        bool IsDataSigned() const;
+
+        /** @brief  Runs the inference (invokes the interpreter). */
+        bool RunInference();
+
+        /** @brief   Model information handler common to all models.
+         *  @return  true or false based on execution success.
+         **/
+        bool ShowModelInfoHandler();
+
+        /** @brief   Gets a pointer to the tensor arena. */
+        uint8_t* GetTensorArena();
+
+    protected:
+        /** @brief      Gets the pointer to the NN model data array.
+         *  @return     Pointer of uint8_t type.
+         **/
+        virtual const uint8_t* ModelPointer() = 0;
+
+        /** @brief      Gets the model size.
+         *  @return     size_t, size in bytes.
+         **/
+        virtual size_t ModelSize() = 0;
+
+        /**
+         * @brief       Gets the op resolver for the model instance.
+         * @return      const reference to a tflite::MicroOpResolver object.
+         **/
+        virtual const tflite::MicroOpResolver& GetOpResolver() = 0;
+
+        /**
+         * @brief       Add all the operators required for the given model.
+         *              Implementation of this should come from the use case.
+         * @return      true is ops are successfully added, false otherwise.
+         **/
+        virtual bool EnlistOperations() = 0;
+
+        /** @brief   Gets the total size of tensor arena available for use. */
+        size_t GetActivationBufferSize();
+
+    private:
+        tflite::MicroErrorReporter      _m_uErrorReporter;                     /* Error reporter object. */
+        tflite::ErrorReporter*          _m_pErrorReporter      = nullptr;      /* Pointer to the error reporter. */
+        const tflite::Model*            _m_pModel              = nullptr;      /* Tflite model pointer. */
+        tflite::MicroInterpreter*       _m_pInterpreter        = nullptr;      /* Tflite interpreter. */
+        tflite::MicroAllocator*         _m_pAllocator          = nullptr;      /* Tflite micro allocator. */
+        bool                            _m_inited              = false;        /* Indicates whether this object has been initialised. */
+
+        std::vector<TfLiteTensor*>      _m_input              = {};           /* Model's input tensor pointers. */
+        std::vector<TfLiteTensor*>      _m_output             = {};           /* Model's output tensor pointers. */
+        TfLiteType                      _m_type               = kTfLiteNoType;/* Model's data type. */
+
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* MODEL_HPP */
diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
new file mode 100644
index 0000000..677b4ba
--- /dev/null
+++ b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TENSORFLOW_LITE_MICRO_LOCAL_HPP
+#define TENSORFLOW_LITE_MICRO_LOCAL_HPP
+
+/* We include all our TensorFlow Lite Micro headers here */
+
+/**
+ * TensorFlow Lite Micro sources can generate a lot of warnings from the usage
+ * of a single macro (TF_LITE_REMOVE_VIRTUAL_DELETE). Suppress the known ones
+ * here to prevent them from masking warnings that might be generated by our
+ * application sources.
+ */
+#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
+    #pragma clang diagnostic push
+    #pragma clang diagnostic ignored "-Wunused-parameter"
+    #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+    #include "tensorflow/lite/micro/micro_interpreter.h"
+    #include "tensorflow/lite/micro/micro_error_reporter.h"
+    #include "tensorflow/lite/micro/all_ops_resolver.h"
+    #pragma clang diagnostic pop
+#elif defined(__GNUC__)
+    #pragma GCC diagnostic push
+    #pragma GCC diagnostic ignored "-Wunused-parameter"
+    #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+    #include "tensorflow/lite/micro/micro_interpreter.h"
+    #include "tensorflow/lite/micro/micro_error_reporter.h"
+    #include "tensorflow/lite/micro/all_ops_resolver.h"
+    #pragma GCC diagnostic pop
+#else
+    #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
+    #include "tensorflow/lite/micro/micro_interpreter.h"
+    #include "tensorflow/lite/micro/micro_error_reporter.h"
+    #include "tensorflow/lite/micro/all_ops_resolver.h"
+#endif
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/micro/kernels/micro_ops.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+#include "tensorflow/lite/version.h"
+
+#if defined (TESTS)
+    #include "tensorflow/lite/micro/test_helpers.h"
+#endif /* defined (TESTS) */
+
+namespace arm {
+namespace app {
+
+    struct QuantParams {
+        float   scale   = 1.0;
+        int     offset  = 0;
+    };
+
+    QuantParams GetTensorQuantParams(TfLiteTensor* tensor);
+
+} /* namespace app */
+} /* namespace arm */
+
+/**
+ * @brief Prints the tensor flow version in use to stdout.
+ */
+void PrintTensorFlowVersion();
+
+#endif /* TENSORFLOW_LITE_MICRO_LOCAL_HPP */
diff --git a/source/use_case/ad/include/AdMelSpectrogram.hpp b/source/use_case/ad/include/AdMelSpectrogram.hpp
new file mode 100644
index 0000000..cf8a1d4
--- /dev/null
+++ b/source/use_case/ad/include/AdMelSpectrogram.hpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ADMELSPECTROGRAM_HPP
+#define ADMELSPECTROGRAM_HPP
+
+#include "MelSpectrogram.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* Class to provide anomaly detection specific Mel Spectrogram calculation requirements */
+    class AdMelSpectrogram : public MelSpectrogram {
+
+    public:
+        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
+        static constexpr uint32_t  ms_defaultNumFbankBins =    64;
+        static constexpr uint32_t  ms_defaultMelLoFreq    =     0;
+        static constexpr uint32_t  ms_defaultMelHiFreq    =  8000;
+        static constexpr bool      ms_defaultUseHtkMethod = false;
+
+        explicit AdMelSpectrogram(const size_t frameLen)
+                :  MelSpectrogram(MelSpecParams(
+                ms_defaultSamplingFreq, ms_defaultNumFbankBins,
+                ms_defaultMelLoFreq, ms_defaultMelHiFreq,
+                frameLen, ms_defaultUseHtkMethod))
+        {}
+
+        AdMelSpectrogram()  = delete;
+        ~AdMelSpectrogram() = default;
+
+    protected:
+
+        /**
+         * @brief       Overrides base class implementation of this function.
+         * @param[in]   fftVec                  Vector populated with FFT magnitudes
+         * @param[in]   melFilterBank           2D Vector with filter bank weights
+         * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
+         *                                      to be used for each bin.
+         * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
+         *                                      to be used for each bin.
+         * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
+         *                                      populated.
+         * @return      true if successful, false otherwise
+         */
+        virtual bool ApplyMelFilterBank(
+                std::vector<float>&                 fftVec,
+                std::vector<std::vector<float>>&    melFilterBank,
+                std::vector<int32_t>&               filterBankFilterFirst,
+                std::vector<int32_t>&               filterBankFilterLast,
+                std::vector<float>&                 melEnergies) override;
+
+        /**
+         * @brief       Override for the base class implementation convert mel
+         *              energies to logarithmic scale. The difference from
+         *              default behaviour is that the power is converted to dB
+         *              and subsequently clamped.
+         * @param[in/out]   melEnergies - 1D vector of Mel energies
+         **/
+        virtual void ConvertToLogarithmicScale(std::vector<float>& melEnergies) override;
+
+        /**
+         * @brief       Given the low and high Mel values, get the normaliser
+         *              for weights to be applied when populating the filter
+         *              bank. Override for the base class implementation.
+         * @param[in]   leftMel - low Mel frequency value
+         * @param[in]   rightMel - high Mel frequency value
+         * @param[in]   useHTKMethod - bool to signal if HTK method is to be
+         *              used for calculation
+         * @return      Return float value to be applied 
+         *              when populating the filter bank.
+         */
+        virtual float GetMelFilterBankNormaliser(
+                const float&   leftMel,
+                const float&   rightMel,
+                const bool     useHTKMethod) override;
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ADMELSPECTROGRAM_HPP */
diff --git a/source/use_case/ad/include/AdModel.hpp b/source/use_case/ad/include/AdModel.hpp
new file mode 100644
index 0000000..2d83455
--- /dev/null
+++ b/source/use_case/ad/include/AdModel.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AD_MODEL_HPP
+#define AD_MODEL_HPP
+
+#include "Model.hpp"
+
+extern const int g_FrameLength;
+extern const int g_FrameStride;
+extern const float g_ScoreThreshold;
+extern const float g_TrainingMean;
+
+namespace arm {
+namespace app {
+
+    class AdModel : public Model {
+    protected:
+        /** @brief   Gets the reference to op resolver interface class */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted */
+        static constexpr int _ms_maxOpCnt = 6;
+
+        /* A mutable op resolver instance */
+        tflite::MicroMutableOpResolver<_ms_maxOpCnt> _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* AD_MODEL_HPP */
diff --git a/source/use_case/ad/include/AdPostProcessing.hpp b/source/use_case/ad/include/AdPostProcessing.hpp
new file mode 100644
index 0000000..f3b35a1
--- /dev/null
+++ b/source/use_case/ad/include/AdPostProcessing.hpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ADPOSTPROCESSING_HPP
+#define ADPOSTPROCESSING_HPP
+
+#include "TensorFlowLiteMicro.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+
+    /** @brief      Dequantize TensorFlow Lite Micro tensor.
+     *  @param[in]  tensor Pointer to the TensorFlow Lite Micro tensor to be dequantized.
+     *  @return     Vector with the dequantized tensor values.
+    **/
+    template<typename T>
+    std::vector<float> Dequantize(TfLiteTensor* tensor);
+
+    /**
+     * @brief   Calculates the softmax of vector in place. **/
+    void Softmax(std::vector<float>& inputVector);
+
+
+    /** @brief      Given a wav file name return AD model output index.
+     *  @param[in]  wavFileName Audio WAV filename.
+     *                          File name should be in format <anything>_<goes>_XX_<here>.wav
+     *                          where XX is the machine ID e.g. 00, 02, 04 or 06
+     *  @return     AD model output index as 8 bit integer.
+    **/
+    int8_t OutputIndexFromFileName(std::string wavFileName);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ADPOSTPROCESSING_HPP */
diff --git a/source/use_case/ad/include/MelSpectrogram.hpp b/source/use_case/ad/include/MelSpectrogram.hpp
new file mode 100644
index 0000000..c1dd61e
--- /dev/null
+++ b/source/use_case/ad/include/MelSpectrogram.hpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MELSPECTROGRAM_HPP
+#define MELSPECTROGRAM_HPP
+
+#include "PlatformMath.hpp"
+
+#include <vector>
+#include <cstdint>
+#include <cmath>
+#include <limits>
+#include <string>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* Mel Spectrogram consolidated parameters */
+    class MelSpecParams {
+    public:
+        float       m_samplingFreq;
+        uint32_t    m_numFbankBins;
+        float       m_melLoFreq;
+        float       m_melHiFreq;
+        uint32_t    m_frameLen;
+        uint32_t    m_frameLenPadded;
+        bool        m_useHtkMethod;
+
+        /** @brief  Constructor */
+        MelSpecParams(const float samplingFreq, const uint32_t numFbankBins,
+                      const float melLoFreq, const float melHiFreq,
+                      const uint32_t frameLen, const bool useHtkMethod);
+
+        MelSpecParams()  = delete;
+        ~MelSpecParams() = default;
+
+        /** @brief  String representation of parameters */
+        std::string Str();
+    };
+
+    /**
+     * @brief   Class for Mel Spectrogram feature extraction.
+     *          Based on https://github.com/ARM-software/ML-KWS-for-MCU/blob/master/Deployment/Source/MFCC/mfcc.cpp
+     *          This class is designed to be generic and self-sufficient but
+     *          certain calculation routines can be overridden to accommodate
+     *          use-case specific requirements.
+     */
+    class MelSpectrogram {
+
+    public:
+        /**
+        * @brief        Extract Mel Spectrogram for one single small frame of
+        *               audio data e.g. 640 samples.
+        * @param[in]    audioData - Vector of audio samples to calculate
+        *               features for.
+        * @param[in]    trainingMean - Value to subtract from the the computed mel spectrogram, default 0.
+        * @return       Vector of extracted Mel Spectrogram features.
+        **/
+        std::vector<float> ComputeMelSpec(const std::vector<int16_t>& audioData, float trainingMean = 0);
+
+        /**
+         * @brief       Constructor
+         * @param[in]   params - Mel Spectrogram parameters
+        */
+        MelSpectrogram(const MelSpecParams& params);
+
+        MelSpectrogram() = delete;
+        ~MelSpectrogram() = default;
+
+        /** @brief  Initialise */
+        void Init();
+
+        /**
+         * @brief        Extract Mel Spectrogram features and quantise for one single small
+         *               frame of audio data e.g. 640 samples.
+         * @param[in]    audioData - Vector of audio samples to calculate
+         *               features for.
+         * @param[in]    quantScale - quantisation scale.
+         * @param[in]    quantOffset - quantisation offset
+         * @return       Vector of extracted quantised Mel Spectrogram features.
+         **/
+        template<typename T>
+        std::vector<T> MelSpecComputeQuant(const std::vector<int16_t>& audioData,
+                                           const float quantScale,
+                                           const int quantOffset,
+                                           float trainingMean = 0)
+        {
+            this->ComputeMelSpec(audioData, trainingMean);
+            float minVal = std::numeric_limits<T>::min();
+            float maxVal = std::numeric_limits<T>::max();
+
+            std::vector<T> melSpecOut(this->_m_params.m_numFbankBins);
+            const size_t numFbankBins = this->_m_params.m_numFbankBins;
+
+            /* Quantize to T. */
+            for (size_t k = 0; k < numFbankBins; ++k) {
+                auto quantizedEnergy = std::round(((this->_m_melEnergies[k]) / quantScale) + quantOffset);
+                melSpecOut[k] = static_cast<T>(std::min<float>(std::max<float>(quantizedEnergy, minVal), maxVal));
+            }
+
+            return melSpecOut;
+        }
+
+        /* Constants */
+        static constexpr float ms_logStep = /*logf(6.4)*/ 1.8562979903656 / 27.0;
+        static constexpr float ms_freqStep = 200.0 / 3;
+        static constexpr float ms_minLogHz = 1000.0;
+        static constexpr float ms_minLogMel = ms_minLogHz / ms_freqStep;
+
+    protected:
+        /**
+         * @brief       Project input frequency to Mel Scale.
+         * @param[in]   freq - input frequency in floating point
+         * @param[in]   useHTKmethod - bool to signal if HTK method is to be
+         *              used for calculation
+         * @return      Mel transformed frequency in floating point
+         **/
+        static float MelScale(const float    freq,
+                              const bool     useHTKMethod = true);
+
+        /**
+         * @brief       Inverse Mel transform - convert MEL warped frequency
+         *              back to normal frequency
+         * @param[in]   freq - Mel frequency in floating point
+         * @param[in]   useHTKmethod - bool to signal if HTK method is to be
+         *              used for calculation
+         * @return      Real world frequency in floating point
+         **/
+        static float InverseMelScale(const float melFreq,
+                                     const bool  useHTKMethod = true);
+
+        /**
+         * @brief       Populates MEL energies after applying the MEL filter
+         *              bank weights and adding them up to be placed into
+         *              bins, according to the filter bank's first and last
+         *              indices (pre-computed for each filter bank element
+         *              by _CreateMelFilterBank function).
+         * @param[in]   fftVec                  Vector populated with FFT magnitudes
+         * @param[in]   melFilterBank           2D Vector with filter bank weights
+         * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
+         *                                      to be used for each bin.
+         * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
+         *                                      to be used for each bin.
+         * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
+         *                                      populated.
+         * @return      true if successful, false otherwise
+         */
+        virtual bool ApplyMelFilterBank(
+                std::vector<float>&                 fftVec,
+                std::vector<std::vector<float>>&    melFilterBank,
+                std::vector<int32_t>&               filterBankFilterFirst,
+                std::vector<int32_t>&               filterBankFilterLast,
+                std::vector<float>&                 melEnergies);
+
+        /**
+         * @brief           Converts the Mel energies for logarithmic scale
+         * @param[in/out]   melEnergies - 1D vector of Mel energies
+         **/
+        virtual void ConvertToLogarithmicScale(std::vector<float>& melEnergies);
+
+        /**
+         * @brief       Given the low and high Mel values, get the normaliser
+         *              for weights to be applied when populating the filter
+         *              bank.
+         * @param[in]   leftMel - low Mel frequency value
+         * @param[in]   rightMel - high Mel frequency value
+         * @param[in]   useHTKMethod - bool to signal if HTK method is to be
+         *              used for calculation
+         * @return      Return float value to be applied 
+         *              when populating the filter bank.
+         */
+        virtual float GetMelFilterBankNormaliser(
+                const float&   leftMel,
+                const float&   rightMel,
+                const bool     useHTKMethod);
+
+    private:
+        MelSpecParams                   _m_params;
+        std::vector<float>              _m_frame;
+        std::vector<float>              _m_buffer;
+        std::vector<float>              _m_melEnergies;
+        std::vector<float>              _m_windowFunc;
+        std::vector<std::vector<float>> _m_melFilterBank;
+        std::vector<int32_t>            _m_filterBankFilterFirst;
+        std::vector<int32_t>            _m_filterBankFilterLast;
+        bool                            _m_filterBankInitialised;
+        arm::app::math::FftInstance     _m_fftInstance;
+
+        /**
+         * @brief       Initialises the filter banks.
+         **/
+        void _InitMelFilterBank();
+
+        /**
+         * @brief       Signals whether the instance of MelSpectrogram has had its
+         *              required buffers initialised
+         * @return      True if initialised, false otherwise
+         **/
+        bool _IsMelFilterBankInited();
+
+        /**
+         * @brief       Create mel filter banks for Mel Spectrogram calculation.
+         * @return      2D vector of floats
+         **/
+        std::vector<std::vector<float>> _CreateMelFilterBank();
+
+        /**
+         * @brief       Computes the magnitude from an interleaved complex array
+         **/
+        void _ConvertToPowerSpectrum();
+
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+
+#endif /* MELSPECTROGRAM_HPP */
diff --git a/source/use_case/ad/include/UseCaseHandler.hpp b/source/use_case/ad/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..b62b36d
--- /dev/null
+++ b/source/use_case/ad/include/UseCaseHandler.hpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AD_EVT_HANDLER_H
+#define AD_EVT_HANDLER_H
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+    /**
+     * @brief       Handles the inference event
+     * @param[in]   ctx         pointer to the application context
+     * @param[in]   dataIndex   index to the input data to classify
+     * @param[in]   runAll      flag to request classification of all the available audio clips
+     * @return      True or false based on execution success
+     **/
+    bool ClassifyVibrationHandler(ApplicationContext& ctx, uint32_t dataIndex, bool runAll);
+} /* namespace app */
+} /* namespace arm */
+#endif /* AD_EVT_HANDLER_H */
\ No newline at end of file
diff --git a/source/use_case/ad/src/AdMelSpectrogram.cc b/source/use_case/ad/src/AdMelSpectrogram.cc
new file mode 100644
index 0000000..183c05c
--- /dev/null
+++ b/source/use_case/ad/src/AdMelSpectrogram.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AdMelSpectrogram.hpp"
+
+#include "PlatformMath.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    bool AdMelSpectrogram::ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies)
+    {
+        const size_t numBanks = melEnergies.size();
+
+        if (numBanks != filterBankFilterFirst.size() ||
+            numBanks != filterBankFilterLast.size()) {
+            printf_err("unexpected filter bank lengths\n");
+            return false;
+        }
+
+        for (size_t bin = 0; bin < numBanks; ++bin) {
+            auto filterBankIter = melFilterBank[bin].begin();
+            float melEnergy = 1e-10; /* Avoid log of zero at later stages. */
+            const int32_t firstIndex = filterBankFilterFirst[bin];
+            const int32_t lastIndex = filterBankFilterLast[bin];
+
+            for (int32_t i = firstIndex; i <= lastIndex; ++i) {
+                melEnergy += (*filterBankIter++ * fftVec[i]);
+            }
+
+            melEnergies[bin] = melEnergy;
+        }
+
+        return true;
+    }
+
+    void AdMelSpectrogram::ConvertToLogarithmicScale(
+            std::vector<float>& melEnergies)
+    {
+        /* Container for natural logarithms of mel energies */
+        std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+        /* Because we are taking natural logs, we need to multiply by log10(e).
+         * Also, for wav2letter model, we scale our log10 values by 10 */
+        constexpr float multiplier = 10.0 * /* default scalar */
+                                     0.4342944819032518; /* log10f(std::exp(1.0))*/
+
+        /* Take log of the whole vector */
+        math::MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+        /* Scale the log values. */
+        for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+             iterM != melEnergies.end(); ++iterM, ++iterL) {
+
+            *iterM = *iterL * multiplier;
+        }
+    }
+
+    float AdMelSpectrogram::GetMelFilterBankNormaliser(
+            const float&    leftMel,
+            const float&    rightMel,
+            const bool      useHTKMethod)
+    {
+        /* Slaney normalization for mel weights. */
+        return (2.0f / (AdMelSpectrogram::InverseMelScale(rightMel, useHTKMethod) -
+                        AdMelSpectrogram::InverseMelScale(leftMel, useHTKMethod)));
+    }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/ad/src/AdModel.cc b/source/use_case/ad/src/AdModel.cc
new file mode 100644
index 0000000..148bc98
--- /dev/null
+++ b/source/use_case/ad/src/AdModel.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AdModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::AdModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+bool arm::app::AdModel::EnlistOperations()
+{
+    this->_m_opResolver.AddAveragePool2D();
+    this->_m_opResolver.AddConv2D();
+    this->_m_opResolver.AddDepthwiseConv2D();
+    this->_m_opResolver.AddRelu6();
+    this->_m_opResolver.AddReshape();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+    return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::AdModel::ModelPointer()
+{
+    return GetModelPointer();
+}
+extern size_t GetModelLen();
+size_t arm::app::AdModel::ModelSize()
+{
+    return GetModelLen();
+}
diff --git a/source/use_case/ad/src/AdPostProcessing.cc b/source/use_case/ad/src/AdPostProcessing.cc
new file mode 100644
index 0000000..157784b
--- /dev/null
+++ b/source/use_case/ad/src/AdPostProcessing.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AdPostProcessing.hpp"
+
+#include "hal.h"
+
+#include <numeric>
+#include <cmath>
+#include <string>
+
+namespace arm {
+namespace app {
+
+    template<typename T>
+    std::vector<float> Dequantize(TfLiteTensor* tensor) {
+
+        if (tensor == nullptr) {
+            printf_err("Tensor is null pointer can not dequantize.\n");
+            return std::vector<float>();
+        }
+        T* tensorData = tflite::GetTensorData<T>(tensor);
+
+        uint32_t totalOutputSize = 1;
+        for (int inputDim = 0; inputDim < tensor->dims->size; inputDim++){
+            totalOutputSize *= tensor->dims->data[inputDim];
+        }
+
+        /* For getting the floating point values, we need quantization parameters */
+        QuantParams quantParams = GetTensorQuantParams(tensor);
+
+        std::vector<float> dequantizedOutput(totalOutputSize);
+
+        for (size_t i = 0; i < totalOutputSize; ++i) {
+            dequantizedOutput[i] = quantParams.scale * (tensorData[i] - quantParams.offset);
+        }
+
+        return dequantizedOutput;
+    }
+
+    void Softmax(std::vector<float>& inputVector) {
+        auto start = inputVector.begin();
+        auto end = inputVector.end();
+
+        /* Fix for numerical stability and apply exp. */
+        float maxValue = *std::max_element(start, end);
+        for (auto it = start; it!=end; ++it) {
+            *it = std::exp((*it) - maxValue);
+        }
+
+        float sumExp = std::accumulate(start, end, 0.0f);
+
+        for (auto it = start; it!=end; ++it) {
+            *it = (*it)/sumExp;
+        }
+    }
+
+    int8_t OutputIndexFromFileName(std::string wavFileName) {
+        /* Filename is assumed in the form machine_id_00.wav */
+        std::string delimiter = "_";  /* First character used to split the file name up. */
+        size_t delimiterStart;
+        std::string subString;
+        size_t machineIdxInString = 3;  /* Which part of the file name the machine id should be at. */
+
+        for (size_t i = 0; i < machineIdxInString; ++i) {
+            delimiterStart = wavFileName.find(delimiter);
+            subString = wavFileName.substr(0, delimiterStart);
+            wavFileName.erase(0, delimiterStart + delimiter.length());
+        }
+
+        /* At this point substring should be 00.wav */
+        delimiter = ".";  /* Second character used to split the file name up. */
+        delimiterStart = subString.find(delimiter);
+        subString = (delimiterStart != std::string::npos) ? subString.substr(0, delimiterStart) : subString;
+
+        auto is_number = [](const std::string& str) ->  bool
+        {
+            std::string::const_iterator it = str.begin();
+            while (it != str.end() && std::isdigit(*it)) ++it;
+            return !str.empty() && it == str.end();
+        };
+
+        const int8_t machineIdx = is_number(subString) ? std::stoi(subString) : -1;
+
+        /* Return corresponding index in the output vector. */
+        if (machineIdx == 0) {
+            return 0;
+        } else if (machineIdx == 2) {
+            return 1;
+        } else if (machineIdx == 4) {
+            return 2;
+        } else if (machineIdx == 6) {
+            return 3;
+        } else {
+            printf_err("%d is an invalid machine index \n", machineIdx);
+            return -1;
+        }
+    }
+
+    template std::vector<float> Dequantize<uint8_t>(TfLiteTensor* tensor);
+    template std::vector<float> Dequantize<int8_t>(TfLiteTensor* tensor);
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/ad/src/MainLoop.cc b/source/use_case/ad/src/MainLoop.cc
new file mode 100644
index 0000000..5455b43
--- /dev/null
+++ b/source/use_case/ad/src/MainLoop.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"                    /* Brings in platform definitions */
+#include "InputFiles.hpp"           /* For input data */
+#include "AdModel.hpp"              /* Model class for running inference */
+#include "UseCaseCommonUtils.hpp"   /* Utils functions */
+#include "UseCaseHandler.hpp"       /* Handlers for different user options */
+
+enum opcodes
+{
+    MENU_OPT_RUN_INF_NEXT = 1,       /* Run on next vector */
+    MENU_OPT_RUN_INF_CHOSEN,         /* Run on a user provided vector index */
+    MENU_OPT_RUN_INF_ALL,            /* Run inference on all */
+    MENU_OPT_SHOW_MODEL_INFO,        /* Show model info */
+    MENU_OPT_LIST_AUDIO_CLIPS        /* List the current baked audio signals */
+};
+
+static void DisplayMenu()
+{
+    printf("\n\nUser input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Classify next audio signal\n", MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Classify audio signal at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run classification on all audio signals\n", MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List audio signals\n\n", MENU_OPT_LIST_AUDIO_CLIPS);
+    printf("  Choice: ");
+}
+
+
+void main_loop(hal_platform& platform)
+{
+    arm::app::AdModel model;  /* Model wrapper object. */
+
+    /* Load the model. */
+    if (!model.Init())
+    {
+        printf_err("failed to initialise model\n");
+        return;
+    }
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("clipIndex", 0);
+    caseContext.Set<int>("frameLength", g_FrameLength);
+    caseContext.Set<int>("frameStride", g_FrameStride);
+    caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);
+    caseContext.Set<float>("trainingMean", g_TrainingMean);
+
+    /* Main program loop. */
+    bool executionSuccessful = true;
+    constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+    /* Loop. */
+    do {
+        int menuOption = MENU_OPT_RUN_INF_NEXT;
+        if (bUseMenu) {
+            DisplayMenu();
+            menuOption = arm::app::ReadUserInputAsInt(platform);
+            printf("\n");
+        }
+        switch (menuOption) {
+            case MENU_OPT_RUN_INF_NEXT:
+                executionSuccessful = ClassifyVibrationHandler(
+                        caseContext,
+                        caseContext.Get<uint32_t>("clipIndex"),
+                        false);
+                break;
+            case MENU_OPT_RUN_INF_CHOSEN: {
+                printf("    Enter the data index [0, %d]: ",
+                       NUMBER_OF_FILES-1);
+                auto audioIndex = static_cast<uint32_t>(
+                        arm::app::ReadUserInputAsInt(platform));
+                executionSuccessful = ClassifyVibrationHandler(caseContext,
+                                                           audioIndex,
+                                                           false);
+                break;
+            }
+            case MENU_OPT_RUN_INF_ALL:
+                executionSuccessful = ClassifyVibrationHandler(
+                    caseContext,
+                    caseContext.Get<uint32_t>("clipIndex"),
+                    true);
+                break;
+            case MENU_OPT_SHOW_MODEL_INFO:
+                executionSuccessful = model.ShowModelInfoHandler();
+                break;
+            case MENU_OPT_LIST_AUDIO_CLIPS:
+                executionSuccessful = ListFilesHandler(caseContext);
+                break;
+            default:
+                printf("Incorrect choice, try again.");
+                break;
+        }
+    } while (executionSuccessful && bUseMenu);
+    info("Main loop terminated.\n");
+}
diff --git a/source/use_case/ad/src/MelSpectrogram.cc b/source/use_case/ad/src/MelSpectrogram.cc
new file mode 100644
index 0000000..86d57e6
--- /dev/null
+++ b/source/use_case/ad/src/MelSpectrogram.cc
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "MelSpectrogram.hpp"
+
+#include "PlatformMath.hpp"
+
+#include <cfloat>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    MelSpecParams::MelSpecParams(
+            const float samplingFreq,
+            const uint32_t numFbankBins,
+            const float melLoFreq,
+            const float melHiFreq,
+            const uint32_t frameLen,
+            const bool useHtkMethod):
+            m_samplingFreq(samplingFreq),
+            m_numFbankBins(numFbankBins),
+            m_melLoFreq(melLoFreq),
+            m_melHiFreq(melHiFreq),
+            m_frameLen(frameLen),
+
+            /* Smallest power of 2 >= frame length. */
+            m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
+            m_useHtkMethod(useHtkMethod)
+    {}
+
+    std::string MelSpecParams::Str()
+    {
+        char strC[1024];
+        snprintf(strC, sizeof(strC) - 1, "\n   \
+    \n\t Sampling frequency:         %f\
+    \n\t Number of filter banks:     %u\
+    \n\t Mel frequency limit (low):  %f\
+    \n\t Mel frequency limit (high): %f\
+    \n\t Frame length:               %u\
+    \n\t Padded frame length:        %u\
+    \n\t Using HTK for Mel scale:    %s\n",
+                 this->m_samplingFreq, this->m_numFbankBins, this->m_melLoFreq,
+                 this->m_melHiFreq, this->m_frameLen,
+                 this->m_frameLenPadded, this->m_useHtkMethod ? "yes" : "no");
+        return std::string{strC};
+    }
+
+    MelSpectrogram::MelSpectrogram(const MelSpecParams& params):
+            _m_params(params),
+            _m_filterBankInitialised(false)
+    {
+        this->_m_buffer = std::vector<float>(
+                this->_m_params.m_frameLenPadded, 0.0);
+        this->_m_frame = std::vector<float>(
+                this->_m_params.m_frameLenPadded, 0.0);
+        this->_m_melEnergies = std::vector<float>(
+                this->_m_params.m_numFbankBins, 0.0);
+
+        this->_m_windowFunc = std::vector<float>(this->_m_params.m_frameLen);
+        const float multiplier = 2 * M_PI / this->_m_params.m_frameLen;
+
+        /* Create window function. */
+        for (size_t i = 0; i < this->_m_params.m_frameLen; ++i) {
+            this->_m_windowFunc[i] = (0.5 - (0.5 *
+                                             math::MathUtils::CosineF32(static_cast<float>(i) * multiplier)));
+        }
+
+        math::MathUtils::FftInitF32(this->_m_params.m_frameLenPadded, this->_m_fftInstance);
+        debug("Instantiated Mel Spectrogram object: %s\n", this->_m_params.Str().c_str());
+    }
+
+    void MelSpectrogram::Init()
+    {
+        this->_InitMelFilterBank();
+    }
+
+    float MelSpectrogram::MelScale(const float freq, const bool useHTKMethod)
+    {
+        if (useHTKMethod) {
+            return 1127.0f * logf (1.0f + freq / 700.0f);
+        } else {
+            /* Slaney formula for mel scale. */
+            float mel = freq / ms_freqStep;
+
+            if (freq >= ms_minLogHz) {
+                mel = ms_minLogMel + logf(freq / ms_minLogHz) / ms_logStep;
+            }
+            return mel;
+        }
+    }
+
+    float MelSpectrogram::InverseMelScale(const float melFreq, const bool useHTKMethod)
+    {
+        if (useHTKMethod) {
+            return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
+        } else {
+            /* Slaney formula for inverse mel scale. */
+            float freq = ms_freqStep * melFreq;
+
+            if (melFreq >= ms_minLogMel) {
+                freq = ms_minLogHz * expf(ms_logStep * (melFreq - ms_minLogMel));
+            }
+            return freq;
+        }
+    }
+
+    bool MelSpectrogram::ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies)
+    {
+        const size_t numBanks = melEnergies.size();
+
+        if (numBanks != filterBankFilterFirst.size() ||
+            numBanks != filterBankFilterLast.size()) {
+            printf_err("unexpected filter bank lengths\n");
+            return false;
+        }
+
+        for (size_t bin = 0; bin < numBanks; ++bin) {
+            auto filterBankIter = melFilterBank[bin].begin();
+            float melEnergy = FLT_MIN; /* Avoid log of zero at later stages */
+            int32_t firstIndex = filterBankFilterFirst[bin];
+            int32_t lastIndex = filterBankFilterLast[bin];
+
+            for (int i = firstIndex; i <= lastIndex; ++i) {
+                float energyRep = math::MathUtils::SqrtF32(fftVec[i]);
+                melEnergy += (*filterBankIter++ * energyRep);
+            }
+
+            melEnergies[bin] = melEnergy;
+        }
+
+        return true;
+    }
+
+    void MelSpectrogram::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+    {
+        for (size_t bin = 0; bin < melEnergies.size(); ++bin) {
+            melEnergies[bin] = logf(melEnergies[bin]);
+        }
+    }
+
+    void MelSpectrogram::_ConvertToPowerSpectrum()
+    {
+        const uint32_t halfDim = this->_m_params.m_frameLenPadded / 2;
+
+        /* Handle this special case. */
+        float firstEnergy = this->_m_buffer[0] * this->_m_buffer[0];
+        float lastEnergy = this->_m_buffer[1] * this->_m_buffer[1];
+
+        math::MathUtils::ComplexMagnitudeSquaredF32(
+                this->_m_buffer.data(),
+                this->_m_buffer.size(),
+                this->_m_buffer.data(),
+                this->_m_buffer.size()/2);
+
+        this->_m_buffer[0] = firstEnergy;
+        this->_m_buffer[halfDim] = lastEnergy;
+    }
+
+    float MelSpectrogram::GetMelFilterBankNormaliser(
+            const float&    leftMel,
+            const float&    rightMel,
+            const bool      useHTKMethod)
+    {
+        UNUSED(leftMel);
+        UNUSED(rightMel);
+        UNUSED(useHTKMethod);
+
+        /* By default, no normalisation => return 1 */
+        return 1.f;
+    }
+
+    void MelSpectrogram::_InitMelFilterBank()
+    {
+        if (!this->_IsMelFilterBankInited()) {
+            this->_m_melFilterBank = this->_CreateMelFilterBank();
+            this->_m_filterBankInitialised = true;
+        }
+    }
+
+    bool MelSpectrogram::_IsMelFilterBankInited()
+    {
+        return this->_m_filterBankInitialised;
+    }
+
+    std::vector<float> MelSpectrogram::ComputeMelSpec(const std::vector<int16_t>& audioData, float trainingMean)
+    {
+        this->_InitMelFilterBank();
+
+        /* TensorFlow way of normalizing .wav data to (-1, 1). */
+        constexpr float normaliser = 1.0/(1<<15);
+        for (size_t i = 0; i < this->_m_params.m_frameLen; ++i) {
+            this->_m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
+        }
+
+        /* Apply window function to input frame. */
+        for(size_t i = 0; i < this->_m_params.m_frameLen; ++i) {
+            this->_m_frame[i] *= this->_m_windowFunc[i];
+        }
+
+        /* Set remaining frame values to 0. */
+        std::fill(this->_m_frame.begin() + this->_m_params.m_frameLen,this->_m_frame.end(), 0);
+
+        /* Compute FFT. */
+        math::MathUtils::FftF32(this->_m_frame, this->_m_buffer, this->_m_fftInstance);
+
+        /* Convert to power spectrum. */
+        this->_ConvertToPowerSpectrum();
+
+        /* Apply mel filterbanks. */
+        if (!this->ApplyMelFilterBank(this->_m_buffer,
+                                      this->_m_melFilterBank,
+                                      this->_m_filterBankFilterFirst,
+                                      this->_m_filterBankFilterLast,
+                                      this->_m_melEnergies)) {
+            printf_err("Failed to apply MEL filter banks\n");
+        }
+
+        /* Convert to logarithmic scale */
+        this->ConvertToLogarithmicScale(this->_m_melEnergies);
+
+        /* Perform mean subtraction. */
+        for (auto& energy:this->_m_melEnergies) {
+            energy -= trainingMean;
+        }
+
+        return this->_m_melEnergies;
+    }
+
+    std::vector<std::vector<float>> MelSpectrogram::_CreateMelFilterBank()
+    {
+        size_t numFftBins = this->_m_params.m_frameLenPadded / 2;
+        float fftBinWidth = static_cast<float>(this->_m_params.m_samplingFreq) / this->_m_params.m_frameLenPadded;
+
+        float melLowFreq = MelSpectrogram::MelScale(this->_m_params.m_melLoFreq,
+                                          this->_m_params.m_useHtkMethod);
+        float melHighFreq = MelSpectrogram::MelScale(this->_m_params.m_melHiFreq,
+                                           this->_m_params.m_useHtkMethod);
+        float melFreqDelta = (melHighFreq - melLowFreq) / (this->_m_params.m_numFbankBins + 1);
+
+        std::vector<float> thisBin = std::vector<float>(numFftBins);
+        std::vector<std::vector<float>> melFilterBank(
+                this->_m_params.m_numFbankBins);
+        this->_m_filterBankFilterFirst =
+                std::vector<int32_t>(this->_m_params.m_numFbankBins);
+        this->_m_filterBankFilterLast =
+                std::vector<int32_t>(this->_m_params.m_numFbankBins);
+
+        for (size_t bin = 0; bin < this->_m_params.m_numFbankBins; bin++) {
+            float leftMel = melLowFreq + bin * melFreqDelta;
+            float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
+            float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
+
+            int32_t firstIndex = -1;
+            int32_t lastIndex = -1;
+            const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->_m_params.m_useHtkMethod);
+
+            for (size_t i = 0; i < numFftBins; ++i) {
+                float freq = (fftBinWidth * i); /* Center freq of this fft bin. */
+                float mel = MelSpectrogram::MelScale(freq, this->_m_params.m_useHtkMethod);
+                thisBin[i] = 0.0;
+
+                if (mel > leftMel && mel < rightMel) {
+                    float weight;
+                    if (mel <= centerMel) {
+                        weight = (mel - leftMel) / (centerMel - leftMel);
+                    } else {
+                        weight = (rightMel - mel) / (rightMel - centerMel);
+                    }
+
+                    thisBin[i] = weight * normaliser;
+                    if (firstIndex == -1) {
+                        firstIndex = i;
+                    }
+                    lastIndex = i;
+                }
+            }
+
+            this->_m_filterBankFilterFirst[bin] = firstIndex;
+            this->_m_filterBankFilterLast[bin] = lastIndex;
+
+            /* Copy the part we care about. */
+            for (int32_t i = firstIndex; i <= lastIndex; ++i) {
+                melFilterBank[bin].push_back(thisBin[i]);
+            }
+        }
+
+        return melFilterBank;
+    }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/ad/src/UseCaseHandler.cc b/source/use_case/ad/src/UseCaseHandler.cc
new file mode 100644
index 0000000..c18a0a4
--- /dev/null
+++ b/source/use_case/ad/src/UseCaseHandler.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "AdModel.hpp"
+#include "InputFiles.hpp"
+#include "Classifier.hpp"
+#include "hal.h"
+#include "AdMelSpectrogram.hpp"
+#include "AudioUtils.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "AdPostProcessing.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+    * @brief           Helper function to increment current audio clip index
+    * @param[in/out]   ctx     pointer to the application context object
+    **/
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+    /**
+     * @brief           Helper function to set the audio clip index
+     * @param[in/out]   ctx     pointer to the application context object
+     * @param[in]       idx     value to be set
+     * @return          true if index is set, false otherwise
+     **/
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
+
+    /**
+     * @brief           Presents inference results using the data presentation
+     *                  object.
+     * @param[in]       platform    reference to the hal platform object
+     * @param[in]       result      average sum of classification results
+     * @param[in]       threhsold   if larger than this value we have an anomaly
+     * @return          true if successful, false otherwise
+     **/
+    static bool _PresentInferenceResult(hal_platform& platform, float result, float threshold);
+
+    /**
+     * @brief Returns a function to perform feature calculation and populates input tensor data with
+     * MelSpe data.
+     *
+     * Input tensor data type check is performed to choose correct MFCC feature data type.
+     * If tensor has an integer data type then original features are quantised.
+     *
+     * Warning: mfcc calculator provided as input must have the same life scope as returned function.
+     *
+     * @param[in]           mfcc            MFCC feature calculator.
+     * @param[in/out]       inputTensor     Input tensor pointer to store calculated features.
+     * @param[i]            cacheSize       Size of the feture vectors cache (number of feature vectors).
+     * @return function     function to be called providing audio sample and sliding window index.
+     */
+    static std::function<void (std::vector<int16_t>&, int, bool, size_t, size_t)>
+    GetFeatureCalculator(audio::AdMelSpectrogram&  melSpec,
+                         TfLiteTensor*             inputTensor,
+                         size_t                    cacheSize,
+                         float                     trainingMean);
+
+    /* Vibration classification handler */
+    bool ClassifyVibrationHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
+    {
+        auto& platform = ctx.Get<hal_platform&>("platform");
+
+        constexpr uint32_t dataPsnTxtInfStartX = 20;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        platform.data_psn->clear(COLOR_BLACK);
+
+        auto& model = ctx.Get<Model&>("model");
+
+        /* If the request has a valid size, set the audio index */
+        if (clipIndex < NUMBER_OF_FILES) {
+            if (!_SetAppCtxClipIdx(ctx, clipIndex)) {
+                return false;
+            }
+        }
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        const auto frameLength = ctx.Get<int>("frameLength");
+        const auto frameStride = ctx.Get<int>("frameStride");
+        const auto scoreThreshold = ctx.Get<float>("scoreThreshold");
+        const float trainingMean = ctx.Get<float>("trainingMean");
+        auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
+
+        TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+        TfLiteTensor* inputTensor = model.GetInputTensor(0);
+
+        if (!inputTensor->dims) {
+            printf_err("Invalid input tensor dims\n");
+            return false;
+        }
+
+        TfLiteIntArray* inputShape = model.GetInputShape(0);
+        const uint32_t kNumRows = inputShape->data[1];
+        const uint32_t kNumCols = inputShape->data[2];
+
+        audio::AdMelSpectrogram melSpec = audio::AdMelSpectrogram(frameLength);
+        melSpec.Init();
+
+        /* Deduce the data length required for 1 inference from the network parameters. */
+        const uint8_t inputResizeScale = 2;
+        const uint32_t audioDataWindowSize = (((inputResizeScale * kNumCols) - 1) * frameStride) + frameLength;
+
+        /* We are choosing to move by 20 frames across the audio for each inference. */
+        const uint8_t nMelSpecVectorsInAudioStride = 20;
+
+        auto audioDataStride = nMelSpecVectorsInAudioStride * frameStride;
+
+        do {
+            auto currentIndex = ctx.Get<uint32_t>("clipIndex");
+
+            /* Get the output index to look at based on id in the filename. */
+            int8_t machineOutputIndex = OutputIndexFromFileName(get_filename(currentIndex));
+            if (machineOutputIndex == -1) {
+                return false;
+            }
+
+            /* Creating a Mel Spectrogram sliding window for the data required for 1 inference.
+             * "resizing" done here by multiplying stride by resize scale. */
+            auto audioMelSpecWindowSlider = audio::SlidingWindow<const int16_t>(
+                    get_audio_array(currentIndex),
+                    audioDataWindowSize, frameLength,
+                    frameStride * inputResizeScale);
+
+            /* Creating a sliding window through the whole audio clip. */
+            auto audioDataSlider = audio::SlidingWindow<const int16_t>(
+                    get_audio_array(currentIndex),
+                    get_audio_array_size(currentIndex),
+                    audioDataWindowSize, audioDataStride);
+
+            /* Calculate number of the feature vectors in the window overlap region taking into account resizing.
+             * These feature vectors will be reused.*/
+            auto numberOfReusedFeatureVectors = kNumRows - (nMelSpecVectorsInAudioStride / inputResizeScale);
+
+            /* Construct feature calculation function. */
+            auto melSpecFeatureCalc = GetFeatureCalculator(melSpec, inputTensor,
+                                                           numberOfReusedFeatureVectors, trainingMean);
+            if (!melSpecFeatureCalc){
+                return false;
+            }
+
+            /* Result is an averaged sum over inferences. */
+            float result = 0;
+
+            /* Display message on the LCD - inference running. */
+            std::string str_inf{"Running inference... "};
+            platform.data_psn->present_data_text(
+                    str_inf.c_str(), str_inf.size(),
+                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+            info("Running inference on audio clip %u => %s\n", currentIndex, get_filename(currentIndex));
+
+            /* Start sliding through audio clip. */
+            while (audioDataSlider.HasNext()) {
+                const int16_t *inferenceWindow = audioDataSlider.Next();
+
+                /* We moved to the next window - set the features sliding to the new address. */
+                audioMelSpecWindowSlider.Reset(inferenceWindow);
+
+                /* The first window does not have cache ready. */
+                bool useCache = audioDataSlider.Index() > 0 && numberOfReusedFeatureVectors > 0;
+
+                /* Start calculating features inside one audio sliding window. */
+                while (audioMelSpecWindowSlider.HasNext()) {
+                    const int16_t *melSpecWindow = audioMelSpecWindowSlider.Next();
+                    std::vector<int16_t> melSpecAudioData = std::vector<int16_t>(melSpecWindow,
+                                                                                 melSpecWindow + frameLength);
+
+                    /* Compute features for this window and write them to input tensor. */
+                    melSpecFeatureCalc(melSpecAudioData, audioMelSpecWindowSlider.Index(),
+                                       useCache, nMelSpecVectorsInAudioStride, inputResizeScale);
+                }
+
+                info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
+                     audioDataSlider.TotalStrides() + 1);
+
+                /* Run inference over this audio clip sliding window */
+                arm::app::RunInference(platform, model);
+
+                /* Use the negative softmax score of the corresponding index as the outlier score */
+                std::vector<float> dequantOutput = Dequantize<int8_t>(outputTensor);
+                Softmax(dequantOutput);
+                result += -dequantOutput[machineOutputIndex];
+
+#if VERIFY_TEST_OUTPUT
+                arm::app::DumpTensor(outputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+            } /* while (audioDataSlider.HasNext()) */
+
+            /* Use average over whole clip as final score. */
+            result /= (audioDataSlider.TotalStrides() + 1);
+
+            /* Erase. */
+            str_inf = std::string(str_inf.size(), ' ');
+            platform.data_psn->present_data_text(
+                    str_inf.c_str(), str_inf.size(),
+                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            ctx.Set<float>("result", result);
+            if (!_PresentInferenceResult(platform, result, scoreThreshold)) {
+                return false;
+            }
+
+            _IncrementAppCtxClipIdx(ctx);
+
+        } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
+
+        return true;
+    }
+
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx)
+    {
+        auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
+
+        if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
+            ctx.Set<uint32_t>("clipIndex", 0);
+            return;
+        }
+        ++curAudioIdx;
+        ctx.Set<uint32_t>("clipIndex", curAudioIdx);
+    }
+
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, const uint32_t idx)
+    {
+        if (idx >= NUMBER_OF_FILES) {
+            printf_err("Invalid idx %u (expected less than %u)\n",
+                       idx, NUMBER_OF_FILES);
+            return false;
+        }
+        ctx.Set<uint32_t>("clipIndex", idx);
+        return true;
+    }
+
+    static bool _PresentInferenceResult(hal_platform& platform, float result, float threshold)
+    {
+        constexpr uint32_t dataPsnTxtStartX1 = 20;
+        constexpr uint32_t dataPsnTxtStartY1 = 30;
+        constexpr uint32_t dataPsnTxtYIncr   = 16; /* Row index increment */
+
+        platform.data_psn->set_text_color(COLOR_GREEN);
+
+        /* Display each result */
+        uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+
+        std::string resultStr = std::string{"Average anomaly score is: "} + std::to_string(result) +
+                std::string("\n") + std::string("Anomaly threshold is: ") + std::to_string(threshold) +
+                std::string("\n");
+
+        if (result > threshold) {
+            resultStr += std::string("Anomaly detected!");
+        } else {
+            resultStr += std::string("Everything fine, no anomaly detected!");
+        }
+
+        platform.data_psn->present_data_text(
+                resultStr.c_str(), resultStr.size(),
+                dataPsnTxtStartX1, rowIdx1, 0);
+
+        info("%s\n", resultStr.c_str());
+
+        return true;
+    }
+
+    /**
+     * @brief Generic feature calculator factory.
+     *
+     * Returns lambda function to compute features using features cache.
+     * Real features math is done by a lambda function provided as a parameter.
+     * Features are written to input tensor memory.
+     *
+     * @tparam T            feature vector type.
+     * @param inputTensor   model input tensor pointer.
+     * @param cacheSize     number of feature vectors to cache. Defined by the sliding window overlap.
+     * @param compute       features calculator function.
+     * @return              lambda function to compute features.
+     */
+    template<class T>
+    std::function<void (std::vector<int16_t>&, size_t, bool, size_t, size_t)>
+    _FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
+                 std::function<std::vector<T> (std::vector<int16_t>& )> compute)
+    {
+        /* Feature cache to be captured by lambda function*/
+        static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
+
+        return [=](std::vector<int16_t>& audioDataWindow,
+                   size_t index,
+                   bool useCache,
+                   size_t featuresOverlapIndex,
+                   size_t resizeScale)
+        {
+            T *tensorData = tflite::GetTensorData<T>(inputTensor);
+            std::vector<T> features;
+
+            /* Reuse features from cache if cache is ready and sliding windows overlap.
+             * Overlap is in the beginning of sliding window with a size of a feature cache. */
+            if (useCache && index < featureCache.size()) {
+                features = std::move(featureCache[index]);
+            } else {
+                features = std::move(compute(audioDataWindow));
+            }
+            auto size = features.size() / resizeScale;
+            auto sizeBytes = sizeof(T);
+
+            /* Input should be transposed and "resized" by skipping elements. */
+            for (size_t outIndex = 0; outIndex < size; outIndex++) {
+                std::memcpy(tensorData + (outIndex*size) + index, &features[outIndex*resizeScale], sizeBytes);
+            }
+
+            /* Start renewing cache as soon iteration goes out of the windows overlap. */
+            if (index >= featuresOverlapIndex / resizeScale) {
+                featureCache[index - featuresOverlapIndex / resizeScale] = std::move(features);
+            }
+        };
+    }
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t, size_t)>
+    _FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
+                         size_t cacheSize,
+                         std::function<std::vector<int8_t> (std::vector<int16_t>&)> compute);
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t, size_t)>
+    _FeatureCalc<uint8_t>(TfLiteTensor* inputTensor,
+                          size_t cacheSize,
+                          std::function<std::vector<uint8_t> (std::vector<int16_t>&)> compute);
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t, size_t)>
+    _FeatureCalc<int16_t>(TfLiteTensor* inputTensor,
+                          size_t cacheSize,
+                          std::function<std::vector<int16_t> (std::vector<int16_t>&)> compute);
+
+    template std::function<void(std::vector<int16_t>&, size_t, bool, size_t, size_t)>
+    _FeatureCalc<float>(TfLiteTensor *inputTensor,
+                        size_t cacheSize,
+                        std::function<std::vector<float>(std::vector<int16_t>&)> compute);
+
+
+    static std::function<void (std::vector<int16_t>&, int, bool, size_t, size_t)>
+    GetFeatureCalculator(audio::AdMelSpectrogram& melSpec, TfLiteTensor* inputTensor, size_t cacheSize, float trainingMean)
+    {
+        std::function<void (std::vector<int16_t>&, size_t, bool, size_t, size_t)> melSpecFeatureCalc;
+
+        TfLiteQuantization quant = inputTensor->quantization;
+
+        if (kTfLiteAffineQuantization == quant.type) {
+
+            auto *quantParams = (TfLiteAffineQuantization *) quant.params;
+            const float quantScale = quantParams->scale->data[0];
+            const int quantOffset = quantParams->zero_point->data[0];
+
+            switch (inputTensor->type) {
+                case kTfLiteInt8: {
+                    melSpecFeatureCalc = _FeatureCalc<int8_t>(inputTensor,
+                                                              cacheSize,
+                                                           [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+                                                               return melSpec.MelSpecComputeQuant<int8_t>(audioDataWindow,
+                                                                       quantScale,
+                                                                       quantOffset,
+                                                                       trainingMean);
+                                                           }
+                    );
+                    break;
+                }
+                case kTfLiteUInt8: {
+                    melSpecFeatureCalc = _FeatureCalc<uint8_t>(inputTensor,
+                                                               cacheSize,
+                                                            [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+                                                                return melSpec.MelSpecComputeQuant<uint8_t>(audioDataWindow,
+                                                                        quantScale,
+                                                                        quantOffset,
+                                                                        trainingMean);
+                                                            }
+                    );
+                    break;
+                }
+                case kTfLiteInt16: {
+                    melSpecFeatureCalc = _FeatureCalc<int16_t>(inputTensor,
+                                                               cacheSize,
+                                                            [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+                                                                return melSpec.MelSpecComputeQuant<int16_t>(audioDataWindow,
+                                                                        quantScale,
+                                                                        quantOffset,
+                                                                        trainingMean);
+                                                            }
+                    );
+                    break;
+                }
+                default:
+                printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
+            }
+
+
+        } else {
+            melSpecFeatureCalc = melSpecFeatureCalc = _FeatureCalc<float>(inputTensor,
+                                                                    cacheSize,
+                                                                    [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+                                                                        return melSpec.ComputeMelSpec(audioDataWindow,
+                                                                                                      trainingMean);
+                                                                    });
+        }
+        return melSpecFeatureCalc;
+    }
+
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/ad/usecase.cmake b/source/use_case/ad/usecase.cmake
new file mode 100644
index 0000000..46e4101
--- /dev/null
+++ b/source/use_case/ad/usecase.cmake
@@ -0,0 +1,111 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# If the path to a directory or source file has been defined,
+# get the type here (FILEPATH or PATH):
+if (DEFINED ${use_case}_FILE_PATH)
+    get_path_type(${${use_case}_FILE_PATH} PATH_TYPE)
+
+    # Set the default type if path is not a dir or file path (or undefined)
+    if (NOT ${PATH_TYPE} STREQUAL PATH AND NOT ${PATH_TYPE} STREQUAL FILEPATH)
+        message(FATAL_ERROR "Invalid ${use_case}_FILE_PATH. It should be a dir or file path.")
+    endif()
+else()
+    # Default is a directory path
+    set(PATH_TYPE PATH)
+endif()
+
+message(STATUS "${use_case}_FILE_PATH is of type: ${PATH_TYPE}")
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single input WAV file, to use in the evaluation application."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+    ${PATH_TYPE})
+
+USER_OPTION(${use_case}_AUDIO_RATE "Specify the target sampling rate. Default is 16000."
+    16000
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MONO "Specify if the audio needs to be converted to mono. Default is ON."
+    ON
+    BOOL)
+
+USER_OPTION(${use_case}_AUDIO_OFFSET "Specify the offset to start reading after this time (in seconds). Default is 0."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_DURATION "Specify the audio duration to load (in seconds). If set to 0 the entire audio will be processed."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_RES_TYPE "Specify re-sampling algorithm to use. By default is 'kaiser_best'."
+    kaiser_best
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MIN_SAMPLES "Specify the minimum number of samples to use. By default is amount needed to do one inference,
+                                           if the audio is shorter then it will be automatically padded."
+    33280
+    STRING)
+
+USER_OPTION(${use_case}_MODEL_SCORE_THRESHOLD "Specify the score threshold for a result to be deemed anomalous."
+    -0.8
+    STRING)
+
+generate_audio_code(${${use_case}_FILE_PATH} ${SRC_GEN_DIR} ${INC_GEN_DIR}
+        ${${use_case}_AUDIO_RATE}
+        ${${use_case}_AUDIO_MONO}
+        ${${use_case}_AUDIO_OFFSET}
+        ${${use_case}_AUDIO_DURATION}
+        ${${use_case}_AUDIO_RES_TYPE}
+        ${${use_case}_AUDIO_MIN_SAMPLES})
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+        0x00200000
+        STRING)
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH)
+
+    set(MODEL_RESOURCES_DIR     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY         ${MODEL_RESOURCES_DIR})
+    set(MODEL_FILENAME          ad_med_nov11_int8.tflite)
+    set(DEFAULT_MODEL_PATH      ${MODEL_RESOURCES_DIR}/${MODEL_FILENAME})
+
+    # TODO: Download the model here for this use case when available on Model Zoo.
+    # For now we write a place holder file.
+    file(WRITE ${DEFAULT_MODEL_PATH} "Placeholder")
+else()
+    set(DEFAULT_MODEL_PATH  "N/A")
+endif()
+
+set(EXTRA_MODEL_CODE
+    "/* Model parameters for ${use_case} */"
+    "extern const int       g_FrameLength = 1024"
+    "extern const int       g_FrameStride = 512"
+    "extern const float     g_ScoreThreshold = ${${use_case}_MODEL_SCORE_THRESHOLD}"
+    "extern const float     g_TrainingMean = -30"
+    )
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+        ${DEFAULT_MODEL_PATH}
+        FILEPATH)
+
+# Generate model file
+generate_tflite_code(
+        MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+        DESTINATION ${SRC_GEN_DIR}
+        EXPRESSIONS ${EXTRA_MODEL_CODE}
+)
diff --git a/source/use_case/asr/include/AsrClassifier.hpp b/source/use_case/asr/include/AsrClassifier.hpp
new file mode 100644
index 0000000..1a63814
--- /dev/null
+++ b/source/use_case/asr/include/AsrClassifier.hpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_CLASSIFIER_HPP
+#define ASR_CLASSIFIER_HPP
+
+#include "Classifier.hpp"
+
+namespace arm {
+namespace app {
+
+    class AsrClassifier : public Classifier {
+    public:
+        /**
+         * @brief       Gets the top N classification results from the
+         *              output vector.
+         * @param[in]   outputTensor   Inference output tensor from an NN model.
+         * @param[out]  vecResults     A vector of classification results
+         *                             populated by this function.
+         * @param[in]   labels         Labels vector to match classified classes
+         * @param[in]   topNCount      Number of top classifications to pick.
+         * @return      true if successful, false otherwise.
+         **/
+        bool GetClassificationResults(
+            TfLiteTensor* outputTensor,
+            std::vector<ClassificationResult>& vecResults,
+            const std::vector <std::string>& labels, uint32_t topNCount) override;
+
+    private:
+        /**
+         * @brief       Utility function that gets the top 1 classification results from the
+         *              output tensor (vector of vector).
+         * @param[in]   tensor       Inference output tensor from an NN model.
+         * @param[out]  vecResults   Vector of classification results populated by this function.
+         * @param[in]   labels       Labels vector to match classified classes.
+         * @param[in]   scale        Quantization scale.
+         * @param[in]   zeroPoint    Quantization zero point.
+         * @return      true if successful, false otherwise.
+         **/
+        template<typename T>
+        bool _GetTopResults(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint);
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_CLASSIFIER_HPP */
\ No newline at end of file
diff --git a/source/use_case/asr/include/AsrResult.hpp b/source/use_case/asr/include/AsrResult.hpp
new file mode 100644
index 0000000..b12ed7d
--- /dev/null
+++ b/source/use_case/asr/include/AsrResult.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_RESULT_HPP
+#define ASR_RESULT_HPP
+
+#include "ClassificationResult.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+namespace asr {
+
+    using ResultVec = std::vector < arm::app::ClassificationResult >;
+
+    /* Structure for holding ASR result. */
+    class AsrResult {
+
+    public:
+        ResultVec       m_resultVec;        /* Container for "thresholded" classification results. */
+        float           m_timeStamp;        /* Audio timestamp for this result. */
+        uint32_t        m_inferenceNumber;  /* Corresponding inference number. */
+        float           m_threshold;        /* Threshold value for `m_resultVec.` */
+
+        AsrResult() = delete;
+        AsrResult(ResultVec&        resultVec,
+                  const float       timestamp,
+                  const uint32_t    inferenceIdx,
+                  const float       scoreThreshold) {
+
+            this->m_threshold = scoreThreshold;
+            this->m_timeStamp = timestamp;
+            this->m_inferenceNumber = inferenceIdx;
+
+            this->m_resultVec = ResultVec();
+            for (auto& i : resultVec) {
+                if (i.m_normalisedVal >= this->m_threshold) {
+                    this->m_resultVec.emplace_back(i);
+                }
+            }
+        }
+        ~AsrResult() = default;
+    };
+
+} /* namespace asr */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_RESULT_HPP */
\ No newline at end of file
diff --git a/source/use_case/asr/include/OutputDecode.hpp b/source/use_case/asr/include/OutputDecode.hpp
new file mode 100644
index 0000000..6095531
--- /dev/null
+++ b/source/use_case/asr/include/OutputDecode.hpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_OUTPUT_DECODE_HPP
+#define ASR_OUTPUT_DECODE_HPP
+
+#include "AsrClassifier.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    /**
+     * @brief       Gets the top N classification results from the
+     *              output vector.
+     * @param[in]   tensor   Label output from classifier.
+     * @return      true if successful, false otherwise.
+    **/
+    std::string DecodeOutput(const std::vector<ClassificationResult>& vecResults);
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_OUTPUT_DECODE_HPP */
\ No newline at end of file
diff --git a/source/use_case/asr/include/UseCaseHandler.hpp b/source/use_case/asr/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..75052c7
--- /dev/null
+++ b/source/use_case/asr/include/UseCaseHandler.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_EVT_HANDLER_HPP
+#define ASR_EVT_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief       Handles the inference event.
+     * @param[in]   ctx         Pointer to the application context.
+     * @param[in]   clipIndex   Index to the audio clip to classify.
+     * @param[in]   runAll      Flag to request classification of all the available audio clips.
+     * @return      true or false based on execution success.
+     **/
+    bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_EVT_HANDLER_HPP */
diff --git a/source/use_case/asr/include/Wav2LetterMfcc.hpp b/source/use_case/asr/include/Wav2LetterMfcc.hpp
new file mode 100644
index 0000000..3cb43b9
--- /dev/null
+++ b/source/use_case/asr/include/Wav2LetterMfcc.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_WAV2LETTER_MFCC_HPP
+#define ASR_WAV2LETTER_MFCC_HPP
+
+#include "Mfcc.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* Class to provide Wav2Letter specific MFCC calculation requirements. */
+    class Wav2LetterMFCC : public MFCC {
+
+    public:
+        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
+        static constexpr uint32_t  ms_defaultNumFbankBins =   128;
+        static constexpr uint32_t  ms_defaultMelLoFreq    =     0;
+        static constexpr uint32_t  ms_defaultMelHiFreq    =  8000;
+        static constexpr bool      ms_defaultUseHtkMethod = false;
+
+        explicit Wav2LetterMFCC(const size_t numFeats, const size_t frameLen)
+            :  MFCC(MfccParams(
+                        ms_defaultSamplingFreq, ms_defaultNumFbankBins,
+                        ms_defaultMelLoFreq, ms_defaultMelHiFreq,
+                        numFeats, frameLen, ms_defaultUseHtkMethod))
+        {}
+
+        Wav2LetterMFCC()  = delete;
+        ~Wav2LetterMFCC() = default;
+
+    protected:
+
+        /**
+         * @brief       Overrides base class implementation of this function.
+         * @param[in]   fftVec                  Vector populated with FFT magnitudes
+         * @param[in]   melFilterBank           2D Vector with filter bank weights
+         * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
+         *                                      to be used for each bin.
+         * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
+         *                                      to be used for each bin.
+         * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
+         *                                      populated.
+         * @return      true if successful, false otherwise
+         */
+        bool ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies) override;
+
+        /**
+         * @brief           Override for the base class implementation convert mel
+         *                  energies to logarithmic scale. The difference from
+         *                  default behaviour is that the power is converted to dB
+         *                  and subsequently clamped.
+         * @param[in,out]   melEnergies   1D vector of Mel energies
+         **/
+        void ConvertToLogarithmicScale(std::vector<float>& melEnergies) override;
+
+        /**
+         * @brief       Create a matrix used to calculate Discrete Cosine
+         *              Transform. Override for the base class' default
+         *              implementation as the first and last elements
+         *              use a different normaliser.
+         * @param[in]   inputLength        input length of the buffer on which
+         *                                 DCT will be performed
+         * @param[in]   coefficientCount   Total coefficients per input length.
+         * @return      1D vector with inputLength x coefficientCount elements
+         *              populated with DCT coefficients.
+         */
+        std::vector<float> CreateDCTMatrix(int32_t inputLength,
+                                           int32_t coefficientCount) override;
+
+        /**
+         * @brief       Given the low and high Mel values, get the normaliser
+         *              for weights to be applied when populating the filter
+         *              bank. Override for the base class implementation.
+         * @param[in]   leftMel        Low Mel frequency value.
+         * @param[in]   rightMel       High Mel frequency value.
+         * @param[in]   useHTKMethod   bool to signal if HTK method is to be
+         *                             used for calculation.
+         * @return      Value to use for normalising.
+         */
+        float GetMelFilterBankNormaliser(const float&   leftMel,
+                                         const float&   rightMel,
+                                         bool     useHTKMethod) override;
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_WAV2LETTER_MFCC_HPP */
\ No newline at end of file
diff --git a/source/use_case/asr/include/Wav2LetterModel.hpp b/source/use_case/asr/include/Wav2LetterModel.hpp
new file mode 100644
index 0000000..b801e10
--- /dev/null
+++ b/source/use_case/asr/include/Wav2LetterModel.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.rved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_WAV2LETTER_MODEL_HPP
+#define ASR_WAV2LETTER_MODEL_HPP
+
+#include "Model.hpp"
+
+extern const int g_FrameLength;
+extern const int g_FrameStride;
+extern const float g_ScoreThreshold;
+extern const int g_ctxLen;
+
+namespace arm {
+namespace app {
+
+    class Wav2LetterModel : public Model {
+
+    public:
+        /* Indices for the expected model - based on input and output tensor shapes */
+        static constexpr uint32_t ms_inputRowsIdx  = 1;
+        static constexpr uint32_t ms_inputColsIdx  = 2;
+        static constexpr uint32_t ms_outputRowsIdx = 2;
+        static constexpr uint32_t ms_outputColsIdx = 3;
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int _ms_maxOpCnt = 5;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<_ms_maxOpCnt> _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_WAV2LETTER_MODEL_HPP */
diff --git a/source/use_case/asr/include/Wav2LetterPostprocess.hpp b/source/use_case/asr/include/Wav2LetterPostprocess.hpp
new file mode 100644
index 0000000..69567a3
--- /dev/null
+++ b/source/use_case/asr/include/Wav2LetterPostprocess.hpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_WAV2LETTER_POSTPROCESS_HPP
+#define ASR_WAV2LETTER_POSTPROCESS_HPP
+
+#include "TensorFlowLiteMicro.hpp" /* TensorFlow headers. */
+#include "hal.h"    /* stdout facility. */
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    /**
+     * @brief   Helper class to manage tensor post-processing for "wav2letter"
+     *          output.
+     */
+    class Postprocess {
+    public:
+        /**
+         * @brief       Constructor
+         * @param[in]   contextLen      Left and right context length for
+         *                              output tensor.
+         * @param[in]   innerLen        This is the length of the section
+         *                              between left and right context.
+         **/
+        Postprocess(uint32_t contextLen,
+                    uint32_t innerLen,
+                    uint32_t blankTokenIdx);
+
+        Postprocess() = delete;
+        ~Postprocess() = default;
+
+        /**
+         * @brief       Erases the required part of the tensor based
+         *              on context lengths set up during initialisation.
+         * @param[in]   tensor          Pointer to the tensor.
+         * @param[in]   axisIdx         Index of the axis on which erase is
+         *                              performed.
+         * @param[in]   lastIteration   Flag to signal this is the
+         *                              last iteration in which case
+         *                              the right context is preserved.
+         * @return      true if successful, false otherwise.
+         */
+        bool Invoke(TfLiteTensor*  tensor,
+                    uint32_t axisIdx,
+                    bool lastIteration = false);
+
+    private:
+        uint32_t    _m_contextLen;      /* lengths of left and right contexts. */
+        uint32_t    _m_innerLen;        /* Length of inner context. */
+        uint32_t    _m_totalLen;        /* Total length of the required axis. */
+        uint32_t    _m_countIterations; /* Current number of iterations. */
+        uint32_t    _m_blankTokenIdx;   /* Index of the labels blank token. */
+        /**
+         * @brief       Checks if the tensor and axis index are valid
+         *              inputs to the object - based on how it has been
+         *              initialised.
+         * @return      true if valid, false otherwise.
+         */
+        bool _IsInputValid(TfLiteTensor*  tensor,
+                           uint32_t axisIdx) const;
+
+        /**
+         * @brief       Gets the tensor data element size in bytes based
+         *              on the tensor type.
+         * @return      Size in bytes, 0 if not supported.
+         */
+        uint32_t _GetTensorElementSize(TfLiteTensor* tensor);
+
+        /**
+         * @brief       Erases sections from the data assuming row-wise
+         *              arrangement along the context axis.
+         * @return      true if successful, false otherwise.
+         */
+        bool _EraseSectionsRowWise(uint8_t* ptrData,
+                                   uint32_t strideSzBytes,
+                                   bool lastIteration);
+
+        /**
+         * @brief       Erases sections from the data assuming col-wise
+         *              arrangement along the context axis.
+         * @return      true if successful, false otherwise.
+         */
+        bool _EraseSectionsColWise(uint8_t* ptrData,
+                                   uint32_t strideSzBytes,
+                                   bool lastIteration);
+    };
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_WAV2LETTER_POSTPROCESS_HPP */
\ No newline at end of file
diff --git a/source/use_case/asr/include/Wav2LetterPreprocess.hpp b/source/use_case/asr/include/Wav2LetterPreprocess.hpp
new file mode 100644
index 0000000..8a4e0b7
--- /dev/null
+++ b/source/use_case/asr/include/Wav2LetterPreprocess.hpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_WAV2LETTER_PREPROCESS_HPP
+#define ASR_WAV2LETTER_PREPROCESS_HPP
+
+#include "Wav2LetterModel.hpp"
+#include "Wav2LetterMfcc.hpp"
+#include "AudioUtils.hpp"
+#include "DataStructures.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    /* Class to facilitate pre-processing calculation for Wav2Letter model
+     * for ASR. */
+    using AudioWindow = SlidingWindow <const int16_t>;
+
+    class Preprocess {
+    public:
+        /**
+         * @brief       Constructor.
+         * @param[in]   numMfccFeatures   Number of MFCC features per window.
+         * @param[in]   windowLen         Number of elements in a window.
+         * @param[in]   windowStride      Stride (in number of elements) for
+         *                                moving the window.
+         * @param[in]   numMfccVectors    Number of MFCC vectors per window.
+         */
+        Preprocess(
+            uint32_t  numMfccFeatures,
+            uint32_t  windowLen,
+            uint32_t  windowStride,
+            uint32_t  numMfccVectors);
+        Preprocess() = delete;
+        ~Preprocess() = default;
+
+        /**
+         * @brief       Calculates the features required from audio data. This
+         *              includes MFCC, first and second order deltas,
+         *              normalisation and finally, quantisation. The tensor is
+         *              populated with feature from a given window placed along
+         *              in a single row.
+         * @param[in]   audioData      Pointer to the first element of audio data.
+         * @param[in]   audioDataLen   Number of elements in the audio data.
+         * @param[in]   tensor         Tensor to be populated.
+         * @return      true if successful, false in case of error.
+         */
+        bool Invoke(const int16_t * audioData,
+                    uint32_t  audioDataLen,
+                    TfLiteTensor *  tensor);
+
+    protected:
+         /**
+          * @brief Computes the first and second order deltas for the
+          *        MFCC buffers - they are assumed to be populated.
+          *
+          * @param[in]  mfcc     MFCC buffers.
+          * @param[out] delta1   Result of the first diff computation.
+          * @param[out] delta2   Result of the second diff computation.
+          * @return     true if successful, false otherwise.
+          */
+         static bool _ComputeDeltas(Array2d<float>& mfcc,
+                                    Array2d<float>& delta1,
+                                    Array2d<float>& delta2);
+
+        /**
+         * @brief       Given a 2D vector of floats, computes the mean.
+         * @param[in]   vec   Vctor of vector of floats.
+         * @return      Mean value.
+         */
+        static float _GetMean(Array2d<float>& vec);
+
+        /**
+         * @brief       Given a 2D vector of floats, computes the stddev.
+         * @param[in]   vec    Vector of vector of floats.
+         * @param[in]   mean   Mean value of the vector passed in.
+         * @return      stddev value.
+         */
+        static float _GetStdDev(Array2d<float>& vec,
+                                float mean);
+
+        /**
+         * @brief           Given a 2D vector of floats, normalises it using
+         *                  the mean and the stddev.
+         * @param[in,out]   vec   Vector of vector of floats.
+         */
+        static void _NormaliseVec(Array2d<float>& vec);
+
+        /**
+         * @brief   Normalises the MFCC and delta buffers.
+         */
+        void _Normalise();
+
+        /**
+         * @brief       Given the quantisation and data type limits, computes
+         *              the quantised values of a floating point input data.
+         * @param[in]   elem          Element to be quantised.
+         * @param[in]   quantScale    Scale.
+         * @param[in]   quantOffset   Offset.
+         * @param[in]   minVal        Numerical limit - minimum.
+         * @param[in]   maxVal        Numerical limit - maximum.
+         * @return      Floating point quantised value.
+         */
+        static float _GetQuantElem(
+                float     elem,
+                float     quantScale,
+                int       quantOffset,
+                float     minVal,
+                float     maxVal);
+
+        /**
+         * @brief       Quantises the MFCC and delta buffers, and places them
+         *              in the output buffer. While doing so, it transposes
+         *              the data. Reason: Buffers in this class are arranged
+         *              for "time" axis to be row major. Primary reason for
+         *              this being the convolution speed up (as we can use
+         *              contiguous memory). The output, however, requires the
+         *              time axis to be in column major arrangement.
+         * @param[in]   outputBuf     Pointer to the output buffer.
+         * @param[in]   outputBufSz   Output buffer's size.
+         * @param[in]   quantScale    Quantisation scale.
+         * @param[in]   quantOffset   Quantisation offset.
+         */
+        template <typename T>
+        bool _Quantise(
+                T *             outputBuf,
+                const uint32_t  outputBufSz,
+                const float     quantScale,
+                const int       quantOffset)
+        {
+            /* Check the output size will fit everything. */
+            if (outputBufSz < (this->_m_mfccBuf.size(0) * 3 * sizeof(T))) {
+                printf_err("Tensor size too small for features\n");
+                return false;
+            }
+
+            /* Populate. */
+            T * outputBufMfcc = outputBuf;
+            T * outputBufD1 = outputBuf + this->_m_numMfccFeats;
+            T * outputBufD2 = outputBufD1 + this->_m_numMfccFeats;
+            const uint32_t ptrIncr = this->_m_numMfccFeats * 2;  /* (3 vectors - 1 vector) */
+
+            const float minVal = std::numeric_limits<T>::min();
+            const float maxVal = std::numeric_limits<T>::max();
+
+            /* Need to transpose while copying and concatenating the tensor. */
+            for (uint32_t j = 0; j < this->_m_numFeatVectors; ++j) {
+                for (uint32_t i = 0; i < this->_m_numMfccFeats; ++i) {
+                    *outputBufMfcc++ = static_cast<T>(Preprocess::_GetQuantElem(
+                                        this->_m_mfccBuf(i, j), quantScale,
+                                        quantOffset, minVal, maxVal));
+                    *outputBufD1++ = static_cast<T>(Preprocess::_GetQuantElem(
+                                        this->_m_delta1Buf(i, j), quantScale,
+                                        quantOffset, minVal, maxVal));
+                    *outputBufD2++ = static_cast<T>(Preprocess::_GetQuantElem(
+                                        this->_m_delta2Buf(i, j), quantScale,
+                                        quantOffset, minVal, maxVal));
+                }
+                outputBufMfcc += ptrIncr;
+                outputBufD1 += ptrIncr;
+                outputBufD2 += ptrIncr;
+            }
+
+            return true;
+        }
+
+    private:
+        Wav2LetterMFCC      _m_mfcc;            /* MFCC instance. */
+
+        /* Actual buffers to be populated. */
+        Array2d<float>      _m_mfccBuf;         /* Contiguous buffer 1D: MFCC */
+        Array2d<float>      _m_delta1Buf;       /* Contiguous buffer 1D: Delta 1 */
+        Array2d<float>      _m_delta2Buf;       /* Contiguous buffer 1D: Delta 2 */
+
+        uint32_t            _m_windowLen;       /* Window length for MFCC. */
+        uint32_t            _m_windowStride;    /* Window stride len for MFCC. */
+        uint32_t            _m_numMfccFeats;    /* Number of MFCC features per window. */
+        uint32_t            _m_numFeatVectors;  /* Number of _m_numMfccFeats. */
+        AudioWindow         _m_window;          /* Sliding window. */
+
+    };
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_WAV2LETTER_PREPROCESS_HPP */
\ No newline at end of file
diff --git a/source/use_case/asr/src/AsrClassifier.cc b/source/use_case/asr/src/AsrClassifier.cc
new file mode 100644
index 0000000..7377d30
--- /dev/null
+++ b/source/use_case/asr/src/AsrClassifier.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AsrClassifier.hpp"
+
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+
+template<typename T>
+bool arm::app::AsrClassifier::_GetTopResults(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint)
+{
+    const uint32_t nElems = tensor->dims->data[arm::app::Wav2LetterModel::ms_outputRowsIdx];
+    const uint32_t nLetters = tensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx];
+
+    /* NOTE: tensor's size verification against labels should be
+     *       checked by the calling/public function. */
+    if (nLetters < 1) {
+        return false;
+    }
+
+    /* Final results' container. */
+    vecResults = std::vector<ClassificationResult>(nElems);
+
+    T* tensorData = tflite::GetTensorData<T>(tensor);
+
+    /* Get the top 1 results. */
+    for (uint32_t i = 0, row = 0; i < nElems; ++i, row+=nLetters) {
+        std::pair<T, uint32_t> top_1 = std::make_pair(tensorData[row + 0], 0);
+
+        for (uint32_t j = 1; j < nLetters; ++j) {
+            if (top_1.first < tensorData[row + j]) {
+                top_1.first = tensorData[row + j];
+                top_1.second = j;
+            }
+        }
+
+        double score = static_cast<int> (top_1.first);
+        vecResults[i].m_normalisedVal = scale * (score - zeroPoint);
+        vecResults[i].m_label = labels[top_1.second];
+        vecResults[i].m_labelIdx = top_1.second;
+    }
+
+    return true;
+}
+template bool arm::app::AsrClassifier::_GetTopResults<uint8_t>(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint);
+template bool arm::app::AsrClassifier::_GetTopResults<int8_t>(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint);
+
+bool arm::app::AsrClassifier::GetClassificationResults(
+            TfLiteTensor* outputTensor,
+            std::vector<ClassificationResult>& vecResults,
+            const std::vector <std::string>& labels, uint32_t topNCount)
+{
+        vecResults.clear();
+
+        constexpr int minTensorDims = static_cast<int>(
+            (arm::app::Wav2LetterModel::ms_outputRowsIdx > arm::app::Wav2LetterModel::ms_outputColsIdx)?
+             arm::app::Wav2LetterModel::ms_outputRowsIdx : arm::app::Wav2LetterModel::ms_outputColsIdx);
+
+        constexpr uint32_t outColsIdx = arm::app::Wav2LetterModel::ms_outputColsIdx;
+
+        /* Sanity checks. */
+        if (outputTensor == nullptr) {
+            printf_err("Output vector is null pointer.\n");
+            return false;
+        } else if (outputTensor->dims->size < minTensorDims) {
+            printf_err("Output tensor expected to be %dD\n", minTensorDims);
+            return false;
+        } else if (static_cast<uint32_t>(outputTensor->dims->data[outColsIdx]) < topNCount) {
+            printf_err("Output vectors are smaller than %u\n", topNCount);
+            return false;
+        } else if (static_cast<uint32_t>(outputTensor->dims->data[outColsIdx]) != labels.size()) {
+            printf("Output size doesn't match the labels' size\n");
+            return false;
+        }
+
+        if (topNCount != 1) {
+            warn("TopNCount value ignored in this implementation\n");
+        }
+
+        /* To return the floating point values, we need quantization parameters. */
+        QuantParams quantParams = GetTensorQuantParams(outputTensor);
+
+        bool resultState;
+
+        switch (outputTensor->type) {
+            case kTfLiteUInt8:
+                resultState = this->_GetTopResults<uint8_t>(
+                                        outputTensor, vecResults,
+                                        labels, quantParams.scale,
+                                        quantParams.offset);
+                break;
+            case kTfLiteInt8:
+                resultState = this->_GetTopResults<int8_t>(
+                                        outputTensor, vecResults,
+                                        labels, quantParams.scale,
+                                        quantParams.offset);
+                break;
+            default:
+                printf_err("Tensor type %s not supported by classifier\n",
+                    TfLiteTypeGetName(outputTensor->type));
+                return false;
+        }
+
+        if (!resultState) {
+            printf_err("Failed to get sorted set\n");
+            return false;
+        }
+
+        return true;
+}
\ No newline at end of file
diff --git a/source/use_case/asr/src/MainLoop.cc b/source/use_case/asr/src/MainLoop.cc
new file mode 100644
index 0000000..ca777be
--- /dev/null
+++ b/source/use_case/asr/src/MainLoop.cc
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"                     /* Brings in platform definitions. */
+#include "Labels.hpp"                /* For label strings. */
+#include "UseCaseHandler.hpp"        /* Handlers for different user options. */
+#include "Wav2LetterModel.hpp"       /* Model class for running inference. */
+#include "UseCaseCommonUtils.hpp"    /* Utils functions. */
+#include "AsrClassifier.hpp"         /* Classifier. */
+#include "InputFiles.hpp"            /* Generated audio clip header. */
+#include "Wav2LetterPreprocess.hpp"  /* Pre-processing class. */
+#include "Wav2LetterPostprocess.hpp" /* Post-processing class. */
+
+enum opcodes
+{
+    MENU_OPT_RUN_INF_NEXT = 1,       /* Run on next vector. */
+    MENU_OPT_RUN_INF_CHOSEN,         /* Run on a user provided vector index. */
+    MENU_OPT_RUN_INF_ALL,            /* Run inference on all. */
+    MENU_OPT_SHOW_MODEL_INFO,        /* Show model info. */
+    MENU_OPT_LIST_AUDIO_CLIPS        /* List the current baked audio clips. */
+};
+
+static void DisplayMenu()
+{
+    printf("\n\nUser input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Classify next audio clip\n", MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Classify audio clip at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run classification on all audio clips\n", MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List audio clips\n\n", MENU_OPT_LIST_AUDIO_CLIPS);
+    printf("  Choice: ");
+}
+
+/** @brief Verify input and output tensor are of certain min dimensions. */
+static bool VerifyTensorDimensions(const arm::app::Model& model);
+
+/** @brief Gets the number of MFCC features for a single window. */
+static uint32_t GetNumMfccFeatures(const arm::app::Model& model);
+
+/** @brief Gets the number of MFCC feature vectors to be computed. */
+static uint32_t GetNumMfccFeatureVectors(const arm::app::Model& model);
+
+/** @brief Gets the output context length (left and right) for post-processing. */
+static uint32_t GetOutputContextLen(const arm::app::Model& model,
+                                    uint32_t inputCtxLen);
+
+/** @brief Gets the output inner length for post-processing. */
+static uint32_t GetOutputInnerLen(const arm::app::Model& model,
+                                  uint32_t outputCtxLen);
+
+void main_loop(hal_platform& platform)
+{
+    arm::app::Wav2LetterModel model;  /* Model wrapper object. */
+
+    /* Load the model. */
+    if (!model.Init()) {
+        printf_err("Failed to initialise model\n");
+        return;
+    } else if (!VerifyTensorDimensions(model)) {
+        printf_err("Model's input or output dimension verification failed\n");
+        return;
+    }
+
+    /* Initialise pre-processing. */
+    arm::app::audio::asr::Preprocess prep(
+                                GetNumMfccFeatures(model),
+                                g_FrameLength,
+                                g_FrameStride,
+                                GetNumMfccFeatureVectors(model));
+
+    /* Initialise post-processing. */
+    const uint32_t outputCtxLen = GetOutputContextLen(model, g_ctxLen);
+    const uint32_t blankTokenIdx = 28;
+    arm::app::audio::asr::Postprocess postp(
+                                outputCtxLen,
+                                GetOutputInnerLen(model, outputCtxLen),
+                                blankTokenIdx);
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+    std::vector <std::string> labels;
+    GetLabelsVector(labels);
+    arm::app::AsrClassifier classifier;  /* Classifier wrapper object. */
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("clipIndex", 0);
+    caseContext.Set<uint32_t>("frameLength", g_FrameLength);
+    caseContext.Set<uint32_t>("frameStride", g_FrameStride);
+    caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);  /* Score threshold. */
+    caseContext.Set<uint32_t>("ctxLen", g_ctxLen);  /* Left and right context length (MFCC feat vectors). */
+    caseContext.Set<const std::vector <std::string>&>("labels", labels);
+    caseContext.Set<arm::app::AsrClassifier&>("classifier", classifier);
+    caseContext.Set<arm::app::audio::asr::Preprocess&>("preprocess", prep);
+    caseContext.Set<arm::app::audio::asr::Postprocess&>("postprocess", postp);
+
+    bool executionSuccessful = true;
+    constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+    /* Loop. */
+    do {
+        int menuOption = MENU_OPT_RUN_INF_NEXT;
+        if (bUseMenu) {
+            DisplayMenu();
+            menuOption = arm::app::ReadUserInputAsInt(platform);
+            printf("\n");
+        }
+        switch (menuOption) {
+            case MENU_OPT_RUN_INF_NEXT:
+                executionSuccessful = ClassifyAudioHandler(
+                                        caseContext,
+                                        caseContext.Get<uint32_t>("clipIndex"),
+                                        false);
+                break;
+            case MENU_OPT_RUN_INF_CHOSEN: {
+                printf("    Enter the audio clip index [0, %d]: ",
+                       NUMBER_OF_FILES-1);
+                auto clipIndex = static_cast<uint32_t>(
+                                    arm::app::ReadUserInputAsInt(platform));
+                executionSuccessful = ClassifyAudioHandler(caseContext,
+                                                           clipIndex,
+                                                           false);
+                break;
+            }
+            case MENU_OPT_RUN_INF_ALL:
+                executionSuccessful = ClassifyAudioHandler(
+                                        caseContext,
+                                        caseContext.Get<uint32_t>("clipIndex"),
+                                        true);
+                break;
+            case MENU_OPT_SHOW_MODEL_INFO:
+                executionSuccessful = model.ShowModelInfoHandler();
+                break;
+            case MENU_OPT_LIST_AUDIO_CLIPS:
+                executionSuccessful = ListFilesHandler(caseContext);
+                break;
+            default:
+                printf("Incorrect choice, try again.");
+                break;
+        }
+    } while (executionSuccessful && bUseMenu);
+    info("Main loop terminated.\n");
+}
+
+static bool VerifyTensorDimensions(const arm::app::Model& model)
+{
+    /* Populate tensor related parameters. */
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    if (!inputTensor->dims) {
+        printf_err("Invalid input tensor dims\n");
+        return false;
+    } else if (inputTensor->dims->size < 3) {
+        printf_err("Input tensor dimension should be >= 3\n");
+        return false;
+    }
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+    if (!outputTensor->dims) {
+        printf_err("Invalid output tensor dims\n");
+        return false;
+    } else if (outputTensor->dims->size < 3) {
+        printf_err("Output tensor dimension should be >= 3\n");
+        return false;
+    }
+
+    return true;
+}
+
+static uint32_t GetNumMfccFeatures(const arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    const int inputCols = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputColsIdx];
+    if (0 != inputCols % 3) {
+        printf_err("Number of input columns is not a multiple of 3\n");
+    }
+    return std::max(inputCols/3, 0);
+}
+
+static uint32_t GetNumMfccFeatureVectors(const arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    const int inputRows = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
+    return std::max(inputRows, 0);
+}
+
+static uint32_t GetOutputContextLen(const arm::app::Model& model, const uint32_t inputCtxLen)
+{
+    const uint32_t inputRows = GetNumMfccFeatureVectors(model);
+    const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
+    constexpr uint32_t ms_outputRowsIdx = arm::app::Wav2LetterModel::ms_outputRowsIdx;
+
+    /* Check to make sure that the input tensor supports the above
+     * context and inner lengths. */
+    if (inputRows <= 2 * inputCtxLen || inputRows <= inputInnerLen) {
+        printf_err("Input rows not compatible with ctx of %u\n",
+            inputCtxLen);
+        return 0;
+    }
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+    const uint32_t outputRows = std::max(outputTensor->dims->data[ms_outputRowsIdx], 0);
+
+    const float tensorColRatio = static_cast<float>(inputRows)/
+                                     static_cast<float>(outputRows);
+
+    return std::round(static_cast<float>(inputCtxLen)/tensorColRatio);
+}
+
+static uint32_t GetOutputInnerLen(const arm::app::Model& model,
+                                  const uint32_t outputCtxLen)
+{
+    constexpr uint32_t ms_outputRowsIdx = arm::app::Wav2LetterModel::ms_outputRowsIdx;
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+    const uint32_t outputRows = std::max(outputTensor->dims->data[ms_outputRowsIdx], 0);
+    return (outputRows - (2 * outputCtxLen));
+}
diff --git a/source/use_case/asr/src/OutputDecode.cc b/source/use_case/asr/src/OutputDecode.cc
new file mode 100644
index 0000000..41fbe07
--- /dev/null
+++ b/source/use_case/asr/src/OutputDecode.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "OutputDecode.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    std::string DecodeOutput(const std::vector<ClassificationResult>& vecResults)
+    {
+        std::string CleanOutputBuffer;
+
+        for (size_t i = 0; i < vecResults.size(); ++i)  /* For all elements in vector. */
+        {
+            while (i+1 < vecResults.size() &&
+                   vecResults[i].m_label == vecResults[i+1].m_label)  /* While the current element is equal to the next, ignore it and move on. */
+            {
+                ++i;
+            }
+            if (vecResults[i].m_label != "$")  /* $ is a character used to represent unknown and double characters so should not be in output. */
+            {
+                CleanOutputBuffer += vecResults[i].m_label;  /* If the element is different to the next, it will be appended to CleanOutputBuffer. */
+            }
+        }
+
+        return CleanOutputBuffer;  /* Return string type containing clean output. */
+    }
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/asr/src/UseCaseHandler.cc b/source/use_case/asr/src/UseCaseHandler.cc
new file mode 100644
index 0000000..e706eb8
--- /dev/null
+++ b/source/use_case/asr/src/UseCaseHandler.cc
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "InputFiles.hpp"
+#include "AsrClassifier.hpp"
+#include "Wav2LetterModel.hpp"
+#include "hal.h"
+#include "Wav2LetterMfcc.hpp"
+#include "AudioUtils.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "AsrResult.hpp"
+#include "Wav2LetterPreprocess.hpp"
+#include "Wav2LetterPostprocess.hpp"
+#include "OutputDecode.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+    * @brief           Helper function to increment current audio clip index.
+    * @param[in,out]   ctx   Pointer to the application context object.
+    **/
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+    /**
+     * @brief           Helper function to set the audio clip index.
+     * @param[in,out]   ctx   Pointer to the application context object.
+     * @param[in]       idx   Value to be set.
+     * @return          true if index is set, false otherwise.
+     **/
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
+
+    /**
+     * @brief           Presents inference results using the data presentation
+     *                  object.
+     * @param[in]       platform    Reference to the hal platform object.
+     * @param[in]       results     Vector of classification results to be displayed.
+     * @param[in]       infTimeMs   Inference time in milliseconds, if available
+     *                              otherwise, this can be passed in as 0.
+     * @return          true if successful, false otherwise.
+     **/
+    static bool _PresentInferenceResult(
+                    hal_platform& platform,
+                    const std::vector<arm::app::asr::AsrResult>& results);
+
+    /* Audio inference classification handler. */
+    bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
+    {
+        constexpr uint32_t dataPsnTxtInfStartX = 20;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        auto& platform = ctx.Get<hal_platform&>("platform");
+        platform.data_psn->clear(COLOR_BLACK);
+
+        /* If the request has a valid size, set the audio index. */
+        if (clipIndex < NUMBER_OF_FILES) {
+            if (!_SetAppCtxClipIdx(ctx, clipIndex)) {
+                return false;
+            }
+        }
+
+        /* Get model reference. */
+        auto& model = ctx.Get<Model&>("model");
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        /* Get score threshold to be applied for the classifier (post-inference). */
+        auto scoreThreshold = ctx.Get<float>("scoreThreshold");
+
+        /* Get tensors. Dimensions of the tensor should have been verified by
+         * the callee. */
+        TfLiteTensor* inputTensor = model.GetInputTensor(0);
+        TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+        const uint32_t inputRows = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
+
+        /* Populate MFCC related parameters. */
+        auto mfccParamsWinLen = ctx.Get<uint32_t>("frameLength");
+        auto mfccParamsWinStride = ctx.Get<uint32_t>("frameStride");
+
+        /* Populate ASR inference context and inner lengths for input. */
+        auto inputCtxLen = ctx.Get<uint32_t>("ctxLen");
+        const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
+
+        /* Audio data stride corresponds to inputInnerLen feature vectors. */
+        const uint32_t audioParamsWinLen = (inputRows - 1) * mfccParamsWinStride + (mfccParamsWinLen);
+        const uint32_t audioParamsWinStride = inputInnerLen * mfccParamsWinStride;
+        const float audioParamsSecondsPerSample = (1.0/audio::Wav2LetterMFCC::ms_defaultSamplingFreq);
+
+        /* Get pre/post-processing objects. */
+        auto& prep = ctx.Get<audio::asr::Preprocess&>("preprocess");
+        auto& postp = ctx.Get<audio::asr::Postprocess&>("postprocess");
+
+        /* Set default reduction axis for post-processing. */
+        const uint32_t reductionAxis = arm::app::Wav2LetterModel::ms_outputRowsIdx;
+
+        /* Audio clip start index. */
+        auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
+
+        /* Loop to process audio clips. */
+        do {
+            /* Get current audio clip index. */
+            auto currentIndex = ctx.Get<uint32_t>("clipIndex");
+
+            /* Get the current audio buffer and respective size. */
+            const int16_t* audioArr = get_audio_array(currentIndex);
+            const uint32_t audioArrSize = get_audio_array_size(currentIndex);
+
+            if (!audioArr) {
+                printf_err("Invalid audio array pointer\n");
+                return false;
+            }
+
+            /* Audio clip must have enough samples to produce 1 MFCC feature. */
+            if (audioArrSize < mfccParamsWinLen) {
+                printf_err("Not enough audio samples, minimum needed is %u\n", mfccParamsWinLen);
+                return false;
+            }
+
+            /* Initialise an audio slider. */
+            auto audioDataSlider = audio::ASRSlidingWindow<const int16_t>(
+                                        audioArr,
+                                        audioArrSize,
+                                        audioParamsWinLen,
+                                        audioParamsWinStride);
+
+            /* Declare a container for results. */
+            std::vector<arm::app::asr::AsrResult> results;
+
+            /* Display message on the LCD - inference running. */
+            std::string str_inf{"Running inference... "};
+            platform.data_psn->present_data_text(
+                                str_inf.c_str(), str_inf.size(),
+                                dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            info("Running inference on audio clip %u => %s\n", currentIndex,
+                 get_filename(currentIndex));
+
+            size_t inferenceWindowLen = audioParamsWinLen;
+
+            /* Start sliding through audio clip. */
+            while (audioDataSlider.HasNext()) {
+
+                /* If not enough audio see how much can be sent for processing. */
+                size_t nextStartIndex = audioDataSlider.NextWindowStartIndex();
+                if (nextStartIndex + audioParamsWinLen > audioArrSize) {
+                    inferenceWindowLen = audioArrSize - nextStartIndex;
+                }
+
+                const int16_t* inferenceWindow = audioDataSlider.Next();
+
+                info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
+                     static_cast<size_t>(ceilf(audioDataSlider.FractionalTotalStrides() + 1)));
+
+                Profiler prepProfiler{&platform, "pre-processing"};
+                prepProfiler.StartProfiling();
+
+                /* Calculate MFCCs, deltas and populate the input tensor. */
+                prep.Invoke(inferenceWindow, inferenceWindowLen, inputTensor);
+
+                prepProfiler.StopProfiling();
+                std::string prepProfileResults = prepProfiler.GetResultsAndReset();
+                info("%s\n", prepProfileResults.c_str());
+
+                /* Run inference over this audio clip sliding window. */
+                arm::app::RunInference(platform, model);
+
+                /* Post-process. */
+                postp.Invoke(outputTensor, reductionAxis, !audioDataSlider.HasNext());
+
+                /* Get results. */
+                std::vector<ClassificationResult> classificationResult;
+                auto& classifier = ctx.Get<AsrClassifier&>("classifier");
+                classifier.GetClassificationResults(
+                            outputTensor, classificationResult,
+                            ctx.Get<std::vector<std::string>&>("labels"), 1);
+
+                results.emplace_back(asr::AsrResult(classificationResult,
+                                                    (audioDataSlider.Index() *
+                                                    audioParamsSecondsPerSample *
+                                                    audioParamsWinStride),
+                                                    audioDataSlider.Index(), scoreThreshold));
+
+#if VERIFY_TEST_OUTPUT
+                arm::app::DumpTensor(outputTensor,
+                    outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx]);
+#endif /* VERIFY_TEST_OUTPUT */
+
+            }
+
+            /* Erase. */
+            str_inf = std::string(str_inf.size(), ' ');
+            platform.data_psn->present_data_text(
+                                str_inf.c_str(), str_inf.size(),
+                                dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            ctx.Set<std::vector<arm::app::asr::AsrResult>>("results", results);
+
+            if (!_PresentInferenceResult(platform, results)) {
+                return false;
+            }
+
+            _IncrementAppCtxClipIdx(ctx);
+
+        } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
+
+        return true;
+    }
+
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx)
+    {
+        auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
+
+        if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
+            ctx.Set<uint32_t>("clipIndex", 0);
+            return;
+        }
+        ++curAudioIdx;
+        ctx.Set<uint32_t>("clipIndex", curAudioIdx);
+    }
+
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, const uint32_t idx)
+    {
+        if (idx >= NUMBER_OF_FILES) {
+            printf_err("Invalid idx %u (expected less than %u)\n",
+                       idx, NUMBER_OF_FILES);
+            return false;
+        }
+
+        ctx.Set<uint32_t>("clipIndex", idx);
+        return true;
+    }
+
+    static bool _PresentInferenceResult(hal_platform& platform,
+                                        const std::vector<arm::app::asr::AsrResult>& results)
+    {
+        constexpr uint32_t dataPsnTxtStartX1 = 20;
+        constexpr uint32_t dataPsnTxtStartY1 = 60;
+        constexpr bool allow_multiple_lines = true;
+
+        platform.data_psn->set_text_color(COLOR_GREEN);
+
+        /* Results from multiple inferences should be combined before processing. */
+        std::vector<arm::app::ClassificationResult> combinedResults;
+        for (auto& result : results) {
+            combinedResults.insert(combinedResults.end(),
+                                   result.m_resultVec.begin(),
+                                   result.m_resultVec.end());
+        }
+
+        /* Get each inference result string using the decoder. */
+        for (const auto & result : results) {
+            std::string infResultStr = audio::asr::DecodeOutput(result.m_resultVec);
+
+            info("Result for inf %u: %s\n", result.m_inferenceNumber,
+                                            infResultStr.c_str());
+        }
+
+        /* Get the decoded result for the combined result. */
+        std::string finalResultStr = audio::asr::DecodeOutput(combinedResults);
+
+        platform.data_psn->present_data_text(
+                            finalResultStr.c_str(), finalResultStr.size(),
+                            dataPsnTxtStartX1, dataPsnTxtStartY1,
+                            allow_multiple_lines);
+
+        info("Final result: %s\n", finalResultStr.c_str());
+        return true;
+    }
+
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/asr/src/Wav2LetterMfcc.cc b/source/use_case/asr/src/Wav2LetterMfcc.cc
new file mode 100644
index 0000000..92c91bc
--- /dev/null
+++ b/source/use_case/asr/src/Wav2LetterMfcc.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterMfcc.hpp"
+
+#include "PlatformMath.hpp"
+
+#include <cfloat>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    bool Wav2LetterMFCC::ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies)
+    {
+        const size_t numBanks = melEnergies.size();
+
+        if (numBanks != filterBankFilterFirst.size() ||
+                numBanks != filterBankFilterLast.size()) {
+            printf_err("Unexpected filter bank lengths\n");
+            return false;
+        }
+
+        for (size_t bin = 0; bin < numBanks; ++bin) {
+            auto filterBankIter = melFilterBank[bin].begin();
+            float melEnergy = 1e-10;  /* Avoid log of zero at later stages, same value used in librosa. */
+            const int32_t firstIndex = filterBankFilterFirst[bin];
+            const int32_t lastIndex = filterBankFilterLast[bin];
+
+            for (int32_t i = firstIndex; i <= lastIndex; ++i) {
+                melEnergy += (*filterBankIter++ * fftVec[i]);
+            }
+
+            melEnergies[bin] = melEnergy;
+        }
+
+        return true;
+    }
+
+    void Wav2LetterMFCC::ConvertToLogarithmicScale(
+                            std::vector<float>& melEnergies)
+    {
+        float maxMelEnergy = -FLT_MAX;
+
+        /* Container for natural logarithms of mel energies. */
+        std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+        /* Because we are taking natural logs, we need to multiply by log10(e).
+         * Also, for wav2letter model, we scale our log10 values by 10. */
+        constexpr float multiplier = 10.0 *  /* Default scalar. */
+                                      0.4342944819032518;  /* log10f(std::exp(1.0)) */
+
+        /* Take log of the whole vector. */
+        math::MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+        /* Scale the log values and get the max. */
+        for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+                  iterM != melEnergies.end(); ++iterM, ++iterL) {
+
+            *iterM = *iterL * multiplier;
+
+            /* Save the max mel energy. */
+            if (*iterM > maxMelEnergy) {
+                maxMelEnergy = *iterM;
+            }
+        }
+
+        /* Clamp the mel energies. */
+        constexpr float maxDb = 80.0;
+        const float clampLevelLowdB = maxMelEnergy - maxDb;
+        for (auto iter = melEnergies.begin(); iter != melEnergies.end(); ++iter) {
+            *iter = std::max(*iter, clampLevelLowdB);
+        }
+    }
+
+    std::vector<float> Wav2LetterMFCC::CreateDCTMatrix(
+                                        const int32_t inputLength,
+                                        const int32_t coefficientCount)
+    {
+        std::vector<float> dctMatix(inputLength * coefficientCount);
+
+        /* Orthonormal normalization. */
+        const float normalizerK0 = 2 * math::MathUtils::SqrtF32(1.0f /
+                                        static_cast<float>(4*inputLength));
+        const float normalizer = 2 * math::MathUtils::SqrtF32(1.0f /
+                                        static_cast<float>(2*inputLength));
+
+        const float angleIncr = M_PI / inputLength;
+        float angle = angleIncr;  /* We start using it at k = 1 loop. */
+
+        /* First row of DCT will use normalizer K0. */
+        for (int32_t n = 0; n < inputLength; ++n) {
+            dctMatix[n] = normalizerK0  /* cos(0) = 1 */;
+        }
+
+        /* Second row (index = 1) onwards, we use standard normalizer. */
+        for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength) {
+            for (int32_t n = 0; n < inputLength; ++n) {
+                dctMatix[m+n] = normalizer *
+                    math::MathUtils::CosineF32((n + 0.5f) * angle);
+            }
+            angle += angleIncr;
+        }
+        return dctMatix;
+    }
+
+    float Wav2LetterMFCC::GetMelFilterBankNormaliser(
+                                    const float&    leftMel,
+                                    const float&    rightMel,
+                                    const bool      useHTKMethod)
+    {
+        /* Slaney normalization for mel weights. */
+        return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
+                MFCC::InverseMelScale(leftMel, useHTKMethod)));
+    }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/asr/src/Wav2LetterModel.cc b/source/use_case/asr/src/Wav2LetterModel.cc
new file mode 100644
index 0000000..5aefecd
--- /dev/null
+++ b/source/use_case/asr/src/Wav2LetterModel.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+bool arm::app::Wav2LetterModel::EnlistOperations()
+{
+    this->_m_opResolver.AddConv2D();
+    this->_m_opResolver.AddMul();
+    this->_m_opResolver.AddMaximum();
+    this->_m_opResolver.AddReshape();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+
+    return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::Wav2LetterModel::ModelPointer()
+{
+    return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::Wav2LetterModel::ModelSize()
+{
+    return GetModelLen();
+}
\ No newline at end of file
diff --git a/source/use_case/asr/src/Wav2LetterPostprocess.cc b/source/use_case/asr/src/Wav2LetterPostprocess.cc
new file mode 100644
index 0000000..60ee51e
--- /dev/null
+++ b/source/use_case/asr/src/Wav2LetterPostprocess.cc
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+
+#include "Wav2LetterModel.hpp"
+
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    Postprocess::Postprocess(const uint32_t contextLen,
+                             const uint32_t innerLen,
+                             const uint32_t blankTokenIdx)
+        :   _m_contextLen(contextLen),
+            _m_innerLen(innerLen),
+            _m_totalLen(2 * this->_m_contextLen + this->_m_innerLen),
+            _m_countIterations(0),
+            _m_blankTokenIdx(blankTokenIdx)
+    {}
+
+    bool Postprocess::Invoke(TfLiteTensor*  tensor,
+                            const uint32_t  axisIdx,
+                            const bool      lastIteration)
+    {
+        /* Basic checks. */
+        if (!this->_IsInputValid(tensor, axisIdx)) {
+            return false;
+        }
+
+        /* Irrespective of tensor type, we use unsigned "byte" */
+        uint8_t* ptrData = tflite::GetTensorData<uint8_t>(tensor);
+        const uint32_t elemSz = this->_GetTensorElementSize(tensor);
+
+        /* Other sanity checks. */
+        if (0 == elemSz) {
+            printf_err("Tensor type not supported for post processing\n");
+            return false;
+        } else if (elemSz * this->_m_totalLen > tensor->bytes) {
+            printf_err("Insufficient number of tensor bytes\n");
+            return false;
+        }
+
+        /* Which axis do we need to process? */
+        switch (axisIdx) {
+            case arm::app::Wav2LetterModel::ms_outputRowsIdx:
+                return this->_EraseSectionsRowWise(ptrData,
+                        elemSz * tensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx],
+                        lastIteration);
+            case arm::app::Wav2LetterModel::ms_outputColsIdx:
+                return this->_EraseSectionsColWise(ptrData,
+                        elemSz * tensor->dims->data[arm::app::Wav2LetterModel::ms_outputRowsIdx],
+                        lastIteration);
+            default:
+                printf_err("Unsupported axis index: %u\n", axisIdx);
+        }
+
+        return false;
+    }
+
+    bool Postprocess::_IsInputValid(TfLiteTensor*  tensor,
+                                    const uint32_t axisIdx) const
+    {
+        if (nullptr == tensor) {
+            return false;
+        }
+
+        if (static_cast<int>(axisIdx) >= tensor->dims->size) {
+            printf_err("Invalid axis index: %u; Max: %d\n",
+                axisIdx, tensor->dims->size);
+            return false;
+        }
+
+        if (static_cast<int>(this->_m_totalLen) !=
+                             tensor->dims->data[axisIdx]) {
+            printf_err("Unexpected tensor dimension for axis %d, \n",
+                tensor->dims->data[axisIdx]);
+            return false;
+        }
+
+        return true;
+    }
+
+    uint32_t Postprocess::_GetTensorElementSize(TfLiteTensor*  tensor)
+    {
+        switch(tensor->type) {
+            case kTfLiteUInt8:
+                return 1;
+            case kTfLiteInt8:
+                return 1;
+            case kTfLiteInt16:
+                return 2;
+            case kTfLiteInt32:
+                return 4;
+            case kTfLiteFloat32:
+                return 4;
+            default:
+                printf_err("Unsupported tensor type %s\n",
+                    TfLiteTypeGetName(tensor->type));
+        }
+
+        return 0;
+    }
+
+    bool Postprocess::_EraseSectionsRowWise(
+                        uint8_t*         ptrData,
+                        const uint32_t   strideSzBytes,
+                        const bool       lastIteration)
+    {
+        /* In this case, the "zero-ing" is quite simple as the region
+         * to be zeroed sits in contiguous memory (row-major). */
+        const uint32_t eraseLen = strideSzBytes * this->_m_contextLen;
+
+        /* Erase left context? */
+        if (this->_m_countIterations > 0) {
+            /* Set output of each classification window to the blank token. */
+            std::memset(ptrData, 0, eraseLen);
+            for (size_t windowIdx = 0; windowIdx < this->_m_contextLen; windowIdx++) {
+                ptrData[windowIdx*strideSzBytes + this->_m_blankTokenIdx] = 1;
+            }
+        }
+
+        /* Erase right context? */
+        if (false == lastIteration) {
+            uint8_t * rightCtxPtr = ptrData + (strideSzBytes * (this->_m_contextLen + this->_m_innerLen));
+            /* Set output of each classification window to the blank token. */
+            std::memset(rightCtxPtr, 0, eraseLen);
+            for (size_t windowIdx = 0; windowIdx < this->_m_contextLen; windowIdx++) {
+                rightCtxPtr[windowIdx*strideSzBytes + this->_m_blankTokenIdx] = 1;
+            }
+        }
+
+        if (lastIteration) {
+            this->_m_countIterations = 0;
+        } else {
+            ++this->_m_countIterations;
+        }
+
+        return true;
+    }
+
+    bool Postprocess::_EraseSectionsColWise(
+                        uint8_t*         ptrData,
+                        const uint32_t   strideSzBytes,
+                        const bool       lastIteration)
+    {
+        /* Not implemented. */
+        UNUSED(ptrData);
+        UNUSED(strideSzBytes);
+        UNUSED(lastIteration);
+        return false;
+    }
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/asr/src/Wav2LetterPreprocess.cc b/source/use_case/asr/src/Wav2LetterPreprocess.cc
new file mode 100644
index 0000000..e46cca3
--- /dev/null
+++ b/source/use_case/asr/src/Wav2LetterPreprocess.cc
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include "PlatformMath.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <algorithm>
+#include <cmath>
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    Preprocess::Preprocess(
+        const uint32_t  numMfccFeatures,
+        const uint32_t  windowLen,
+        const uint32_t  windowStride,
+        const uint32_t  numMfccVectors):
+            _m_mfcc(numMfccFeatures, windowLen),
+            _m_mfccBuf(numMfccFeatures, numMfccVectors),
+            _m_delta1Buf(numMfccFeatures, numMfccVectors),
+            _m_delta2Buf(numMfccFeatures, numMfccVectors),
+            _m_windowLen(windowLen),
+            _m_windowStride(windowStride),
+            _m_numMfccFeats(numMfccFeatures),
+            _m_numFeatVectors(numMfccVectors),
+            _m_window()
+    {
+        if (numMfccFeatures > 0 && windowLen > 0) {
+            this->_m_mfcc.Init();
+        }
+    }
+
+    bool Preprocess::Invoke(
+                const int16_t*  audioData,
+                const uint32_t  audioDataLen,
+                TfLiteTensor*   tensor)
+    {
+        this->_m_window = SlidingWindow<const int16_t>(
+                            audioData, audioDataLen,
+                            this->_m_windowLen, this->_m_windowStride);
+
+        uint32_t mfccBufIdx = 0;
+
+        std::fill(_m_mfccBuf.begin(), _m_mfccBuf.end(), 0.f);
+        std::fill(_m_delta1Buf.begin(), _m_delta1Buf.end(), 0.f);
+        std::fill(_m_delta2Buf.begin(), _m_delta2Buf.end(), 0.f);
+
+        /* While we can slide over the window. */
+        while (this->_m_window.HasNext()) {
+            const int16_t*  mfccWindow = this->_m_window.Next();
+            auto mfccAudioData = std::vector<int16_t>(
+                                        mfccWindow,
+                                        mfccWindow + this->_m_windowLen);
+            auto mfcc = this->_m_mfcc.MfccCompute(mfccAudioData);
+            for (size_t i = 0; i < this->_m_mfccBuf.size(0); ++i) {
+                this->_m_mfccBuf(i, mfccBufIdx) = mfcc[i];
+            }
+            ++mfccBufIdx;
+        }
+
+        /* Pad MFCC if needed by adding MFCC for zeros. */
+        if (mfccBufIdx != this->_m_numFeatVectors) {
+            std::vector<int16_t> zerosWindow = std::vector<int16_t>(this->_m_windowLen, 0);
+            std::vector<float> mfccZeros = this->_m_mfcc.MfccCompute(zerosWindow);
+
+            while (mfccBufIdx != this->_m_numFeatVectors) {
+                memcpy(&this->_m_mfccBuf(0, mfccBufIdx),
+                       mfccZeros.data(), sizeof(float) * _m_numMfccFeats);
+                ++mfccBufIdx;
+            }
+        }
+
+        /* Compute first and second order deltas from MFCCs. */
+        this->_ComputeDeltas(this->_m_mfccBuf,
+                             this->_m_delta1Buf,
+                             this->_m_delta2Buf);
+
+        /* Normalise. */
+        this->_Normalise();
+
+        /* Quantise. */
+        QuantParams quantParams = GetTensorQuantParams(tensor);
+
+        if (0 == quantParams.scale) {
+            printf_err("Quantisation scale can't be 0\n");
+            return false;
+        }
+
+        switch(tensor->type) {
+            case kTfLiteUInt8:
+                return this->_Quantise<uint8_t>(
+                        tflite::GetTensorData<uint8_t>(tensor), tensor->bytes,
+                        quantParams.scale, quantParams.offset);
+            case kTfLiteInt8:
+                return this->_Quantise<int8_t>(
+                        tflite::GetTensorData<int8_t>(tensor), tensor->bytes,
+                        quantParams.scale, quantParams.offset);
+            default:
+                printf_err("Unsupported tensor type %s\n",
+                    TfLiteTypeGetName(tensor->type));
+        }
+
+        return false;
+    }
+
+    bool Preprocess::_ComputeDeltas(Array2d<float>& mfcc,
+                                    Array2d<float>& delta1,
+                                    Array2d<float>& delta2)
+    {
+        const std::vector <float> delta1Coeffs =
+            {6.66666667e-02,  5.00000000e-02,  3.33333333e-02,
+             1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
+            -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
+
+        const std::vector <float> delta2Coeffs =
+            {0.06060606,      0.01515152,     -0.01731602,
+            -0.03679654,     -0.04329004,     -0.03679654,
+            -0.01731602,      0.01515152,      0.06060606};
+
+        if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
+            mfcc.size(0) == 0 || mfcc.size(1) == 0) {
+            return false;
+        }
+
+        /* Get the middle index; coeff vec len should always be odd. */
+        const size_t coeffLen = delta1Coeffs.size();
+        const size_t fMidIdx = (coeffLen - 1)/2;
+        const size_t numFeatures = mfcc.size(0);
+        const size_t numFeatVectors = mfcc.size(1);
+
+        /* Iterate through features in MFCC vector. */
+        for (size_t i = 0; i < numFeatures; ++i) {
+            /* For each feature, iterate through time (t) samples representing feature evolution and
+             * calculate d/dt and d^2/dt^2, using 1D convolution with differential kernels.
+             * Convolution padding = valid, result size is `time length - kernel length + 1`.
+             * The result is padded with 0 from both sides to match the size of initial time samples data.
+             *
+             * For the small filter, conv1D implementation as a simple loop is efficient enough.
+             * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
+             */
+
+            for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j) {
+                float d1 = 0;
+                float d2 = 0;
+                const size_t mfccStIdx = j - fMidIdx;
+
+                for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m) {
+
+                    d1 +=  mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
+                    d2 +=  mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
+                }
+
+                delta1(i,j) = d1;
+                delta2(i,j) = d2;
+            }
+        }
+
+        return true;
+    }
+
+    float Preprocess::_GetMean(Array2d<float>& vec)
+    {
+        return math::MathUtils::MeanF32(vec.begin(), vec.totalSize());
+    }
+
+    float Preprocess::_GetStdDev(Array2d<float>& vec, const float mean)
+    {
+        return math::MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
+    }
+
+    void Preprocess::_NormaliseVec(Array2d<float>& vec)
+    {
+        auto mean = Preprocess::_GetMean(vec);
+        auto stddev = Preprocess::_GetStdDev(vec, mean);
+
+        debug("Mean: %f, Stddev: %f\n", mean, stddev);
+        if (stddev == 0) {
+            std::fill(vec.begin(), vec.end(), 0);
+        } else {
+            const float stddevInv = 1.f/stddev;
+            const float normalisedMean = mean/stddev;
+
+            auto NormalisingFunction = [=](float& value) {
+                value = value * stddevInv - normalisedMean;
+            };
+            std::for_each(vec.begin(), vec.end(), NormalisingFunction);
+        }
+    }
+
+    void Preprocess::_Normalise()
+    {
+        Preprocess::_NormaliseVec(this->_m_mfccBuf);
+        Preprocess::_NormaliseVec(this->_m_delta1Buf);
+        Preprocess::_NormaliseVec(this->_m_delta2Buf);
+    }
+
+    float Preprocess::_GetQuantElem(
+                const float     elem,
+                const float     quantScale,
+                const int       quantOffset,
+                const float     minVal,
+                const float     maxVal)
+    {
+        float val = std::round((elem/quantScale) + quantOffset);
+        return std::min<float>(std::max<float>(val, minVal), maxVal);
+    }
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/asr/usecase.cmake b/source/use_case/asr/usecase.cmake
new file mode 100644
index 0000000..e4b8752
--- /dev/null
+++ b/source/use_case/asr/usecase.cmake
@@ -0,0 +1,164 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# If the path to a directory or source file has been defined,
+# get the type here (FILEPATH or PATH):
+if (DEFINED ${use_case}_FILE_PATH)
+    get_path_type(${${use_case}_FILE_PATH} PATH_TYPE)
+
+    # Set the default type if path is not a dir or file path (or undefined)
+    if (NOT ${PATH_TYPE} STREQUAL PATH AND NOT ${PATH_TYPE} STREQUAL FILEPATH)
+        message(FATAL_ERROR "Invalid ${use_case}_FILE_PATH. It should be a dir or file path.")
+    endif()
+else()
+    # Default is a directory path
+    set(PATH_TYPE PATH)
+endif()
+
+message(STATUS "${use_case}_FILE_PATH is of type: ${PATH_TYPE}")
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single WAV file, to use in the evaluation application."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+    ${PATH_TYPE})
+
+USER_OPTION(${use_case}_LABELS_TXT_FILE "Labels' txt file for the chosen model."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/labels/labels_wav2letter.txt
+    FILEPATH)
+
+USER_OPTION(${use_case}_AUDIO_RATE "Specify the target sampling rate. Default is 16000."
+    16000
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MONO "Specify if the audio needs to be converted to mono. Default is ON."
+    ON
+    BOOL)
+
+USER_OPTION(${use_case}_AUDIO_OFFSET "Specify the offset to start reading after this time (in seconds). Default is 0."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_DURATION "Specify the audio duration to load (in seconds). If set to 0 the entire audio will be processed."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_RES_TYPE "Specify re-sampling algorithm to use. By default is 'kaiser_best'."
+    kaiser_best
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MIN_SAMPLES "Specify the minimum number of samples to use. By default is 16000, if the audio is shorter will be automatically padded."
+    16000
+    STRING)
+
+USER_OPTION(${use_case}_MODEL_SCORE_THRESHOLD "Specify the score threshold [0.0, 1.0) that must be applied to the inference results for a label to be deemed valid."
+    0.5
+    STRING)
+
+# Generate input files
+generate_audio_code(${${use_case}_FILE_PATH} ${SRC_GEN_DIR} ${INC_GEN_DIR}
+    ${${use_case}_AUDIO_RATE}
+    ${${use_case}_AUDIO_MONO}
+    ${${use_case}_AUDIO_OFFSET}
+    ${${use_case}_AUDIO_DURATION}
+    ${${use_case}_AUDIO_RES_TYPE}
+    ${${use_case}_AUDIO_MIN_SAMPLES})
+
+# Generate labels file
+set(${use_case}_LABELS_CPP_FILE Labels)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE}" 
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+)
+
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+    0x00200000
+    STRING)
+
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH)
+
+    set(MODEL_FILENAME          wav2letter_int8.tflite)
+    set(MODEL_RESOURCES_DIR     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY         ${MODEL_RESOURCES_DIR})
+    set(DEFAULT_MODEL_PATH      ${MODEL_RESOURCES_DIR}/${MODEL_FILENAME})
+
+    # Download the default model
+    set(ZOO_COMMON_SUBPATH      "models/speech_recognition/wav2letter/tflite_int8")
+    set(ZOO_MODEL_SUBPATH       "${ZOO_COMMON_SUBPATH}/${MODEL_FILENAME}")
+
+    download_file_from_modelzoo(${ZOO_MODEL_SUBPATH}    ${DEFAULT_MODEL_PATH})
+
+    if (ETHOS_U55_ENABLED)
+        message(STATUS
+            "Ethos-U55 is enabled, but the model downloaded is not optimized by vela. "
+            "To use Ethos-U55 acceleration, optimise the downloaded model and pass it "
+            "as ${use_case}_MODEL_TFLITE_PATH to the CMake configuration.")
+    endif()
+
+    # If the target platform is native
+    if (${TARGET_PLATFORM} STREQUAL native)
+
+        # Download test vectors
+        set(ZOO_TEST_IFM_SUBPATH    "${ZOO_COMMON_SUBPATH}/testing_input/input_2_int8/0.npy")
+        set(ZOO_TEST_OFM_SUBPATH    "${ZOO_COMMON_SUBPATH}/testing_output/Identity_int8/0.npy")
+
+        set(${use_case}_TEST_IFM    ${MODEL_RESOURCES_DIR}/ifm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}")
+        set(${use_case}_TEST_OFM    ${MODEL_RESOURCES_DIR}/ofm0.npy CACHE FILEPATH
+                                "Input test vector for ${use_case}")
+
+        download_file_from_modelzoo(${ZOO_TEST_IFM_SUBPATH} ${${use_case}_TEST_IFM})
+        download_file_from_modelzoo(${ZOO_TEST_OFM_SUBPATH} ${${use_case}_TEST_OFM})
+
+        set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+        set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+        file(MAKE_DIRECTORY ${TEST_SRC_GEN_DIR} ${TEST_INC_GEN_DIR})
+
+        # Generate test data files to be included in x86 tests
+        generate_test_data_code(
+                            INPUT_DIR "${DOWNLOAD_DEP_DIR}/${use_case}"
+                            DESTINATION_SRC ${TEST_SRC_GEN_DIR}
+                            DESTINATION_HDR ${TEST_INC_GEN_DIR}
+                            USECASE  "${use_case}")
+    endif()
+
+else()
+    set(DEFAULT_MODEL_PATH  "N/A")
+endif()
+
+set(EXTRA_MODEL_CODE
+    "/* Model parameters for ${use_case} */"
+    "extern const int   g_FrameLength    = 512"
+    "extern const int   g_FrameStride    = 160"
+    "extern const int   g_ctxLen         =  98"
+    "extern const float g_ScoreThreshold = ${${use_case}_MODEL_SCORE_THRESHOLD}"
+    )
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH
+    )
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+    EXPRESSIONS ${EXTRA_MODEL_CODE}
+    )
diff --git a/source/use_case/img_class/include/MobileNetModel.hpp b/source/use_case/img_class/include/MobileNetModel.hpp
new file mode 100644
index 0000000..f0521ce
--- /dev/null
+++ b/source/use_case/img_class/include/MobileNetModel.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef IMG_CLASS_MOBILENETMODEL_HPP
+#define IMG_CLASS_MOBILENETMODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+    class MobileNetModel : public Model {
+
+    public:
+        /* Indices for the expected model - based on input tensor shape */
+        static constexpr uint32_t ms_inputRowsIdx     = 1;
+        static constexpr uint32_t ms_inputColsIdx     = 2;
+        static constexpr uint32_t ms_inputChannelsIdx = 3;
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int _ms_maxOpCnt = 7;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<_ms_maxOpCnt> _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* IMG_CLASS_MOBILENETMODEL_HPP */
\ No newline at end of file
diff --git a/source/use_case/img_class/include/UseCaseHandler.hpp b/source/use_case/img_class/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..a6cf104
--- /dev/null
+++ b/source/use_case/img_class/include/UseCaseHandler.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef IMG_CLASS_EVT_HANDLER_HPP
+#define IMG_CLASS_EVT_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief       Handles the inference event.
+     * @param[in]   ctx        Pointer to the application context.
+     * @param[in]   imgIndex   Index to the image to classify.
+     * @param[in]   runAll     Flag to request classification of all the available images.
+     * @return      true or false based on execution success.
+     **/
+    bool ClassifyImageHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* IMG_CLASS_EVT_HANDLER_HPP */
\ No newline at end of file
diff --git a/source/use_case/img_class/src/MainLoop.cc b/source/use_case/img_class/src/MainLoop.cc
new file mode 100644
index 0000000..469907c
--- /dev/null
+++ b/source/use_case/img_class/src/MainLoop.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"                    /* Brings in platform definitions. */
+#include "Classifier.hpp"           /* Classifier. */
+#include "InputFiles.hpp"           /* For input images. */
+#include "Labels.hpp"               /* For label strings. */
+#include "MobileNetModel.hpp"       /* Model class for running inference. */
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include "UseCaseCommonUtils.hpp"   /* Utils functions. */
+
+using ImgClassClassifier = arm::app::Classifier;
+
+enum opcodes
+{
+    MENU_OPT_RUN_INF_NEXT = 1,       /* Run on next vector. */
+    MENU_OPT_RUN_INF_CHOSEN,         /* Run on a user provided vector index. */
+    MENU_OPT_RUN_INF_ALL,            /* Run inference on all. */
+    MENU_OPT_SHOW_MODEL_INFO,        /* Show model info. */
+    MENU_OPT_LIST_IMAGES             /* List the current baked images. */
+};
+
+static void DisplayMenu()
+{
+    printf("\n\nUser input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Classify next image\n", MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Classify image at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run classification on all images\n", MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List images\n\n", MENU_OPT_LIST_IMAGES);
+    printf("  Choice: ");
+}
+
+void main_loop(hal_platform& platform)
+{
+    arm::app::MobileNetModel model;  /* Model wrapper object. */
+
+    /* Load the model. */
+    if (!model.Init()) {
+        printf_err("Failed to initialise model\n");
+        return;
+    }
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("imgIndex", 0);
+
+    ImgClassClassifier classifier;  /* Classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+    std::vector <std::string> labels;
+    GetLabelsVector(labels);
+    caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+    /* Loop. */
+    bool executionSuccessful = true;
+    constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+    /* Loop. */
+    do {
+        int menuOption = MENU_OPT_RUN_INF_NEXT;
+        if (bUseMenu) {
+            DisplayMenu();
+            menuOption = arm::app::ReadUserInputAsInt(platform);
+            printf("\n");
+        }
+        switch (menuOption) {
+            case MENU_OPT_RUN_INF_NEXT:
+                executionSuccessful = ClassifyImageHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), false);
+                break;
+            case MENU_OPT_RUN_INF_CHOSEN: {
+                printf("    Enter the image index [0, %d]: ", NUMBER_OF_FILES-1);
+                auto imgIndex = static_cast<uint32_t>(arm::app::ReadUserInputAsInt(platform));
+                executionSuccessful = ClassifyImageHandler(caseContext, imgIndex, false);
+                break;
+            }
+            case MENU_OPT_RUN_INF_ALL:
+                executionSuccessful = ClassifyImageHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), true);
+                break;
+            case MENU_OPT_SHOW_MODEL_INFO:
+                executionSuccessful = model.ShowModelInfoHandler();
+                break;
+            case MENU_OPT_LIST_IMAGES:
+                executionSuccessful = ListFilesHandler(caseContext);
+                break;
+            default:
+                printf("Incorrect choice, try again.");
+                break;
+        }
+    } while (executionSuccessful && bUseMenu);
+    info("Main loop terminated.\n");
+}
\ No newline at end of file
diff --git a/source/use_case/img_class/src/MobileNetModel.cc b/source/use_case/img_class/src/MobileNetModel.cc
new file mode 100644
index 0000000..eeaa109
--- /dev/null
+++ b/source/use_case/img_class/src/MobileNetModel.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "MobileNetModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::MobileNetModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+bool arm::app::MobileNetModel::EnlistOperations()
+{
+    this->_m_opResolver.AddDepthwiseConv2D();
+    this->_m_opResolver.AddConv2D();
+    this->_m_opResolver.AddAveragePool2D();
+    this->_m_opResolver.AddAdd();
+    this->_m_opResolver.AddReshape();
+    this->_m_opResolver.AddSoftmax();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+    return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::MobileNetModel::ModelPointer()
+{
+    return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::MobileNetModel::ModelSize()
+{
+    return GetModelLen();
+}
\ No newline at end of file
diff --git a/source/use_case/img_class/src/UseCaseHandler.cc b/source/use_case/img_class/src/UseCaseHandler.cc
new file mode 100644
index 0000000..a412fec
--- /dev/null
+++ b/source/use_case/img_class/src/UseCaseHandler.cc
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "Classifier.hpp"
+#include "InputFiles.hpp"
+#include "MobileNetModel.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "hal.h"
+
+using ImgClassClassifier = arm::app::Classifier;
+
+namespace arm {
+namespace app {
+
+    /**
+    * @brief           Helper function to load the current image into the input
+    *                  tensor.
+    * @param[in]       imIdx         Image index (from the pool of images available
+    *                                to the application).
+    * @param[out]      inputTensor   Pointer to the input tensor to be populated.
+    * @return          true if tensor is loaded, false otherwise.
+    **/
+    static bool _LoadImageIntoTensor(uint32_t imIdx, TfLiteTensor* inputTensor);
+
+    /**
+     * @brief           Helper function to increment current image index.
+     * @param[in,out]   ctx   Pointer to the application context object.
+     **/
+    static void _IncrementAppCtxImageIdx(ApplicationContext& ctx);
+
+    /**
+     * @brief           Helper function to set the image index.
+     * @param[in,out]   ctx   Pointer to the application context object.
+     * @param[in]       idx   Value to be set.
+     * @return          true if index is set, false otherwise.
+     **/
+    static bool _SetAppCtxImageIdx(ApplicationContext& ctx, uint32_t idx);
+
+    /**
+     * @brief           Presents inference results using the data presentation
+     *                  object.
+     * @param[in]       platform    Reference to the hal platform object.
+     * @param[in]       results     Vector of classification results to be displayed.
+     * @param[in]       infTimeMs   Inference time in milliseconds, if available
+     *                              otherwise, this can be passed in as 0.
+     * @return          true if successful, false otherwise.
+     **/
+    static bool _PresentInferenceResult(hal_platform& platform,
+                                        const std::vector<ClassificationResult>& results);
+
+    /**
+     * @brief           Helper function to convert a UINT8 image to INT8 format.
+     * @param[in,out]   data            Pointer to the data start.
+     * @param[in]       kMaxImageSize   Total number of pixels in the image.
+     **/
+    static void ConvertImgToInt8(void* data, size_t kMaxImageSize);
+
+    /* Image inference classification handler. */
+    bool ClassifyImageHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll)
+    {
+        auto& platform = ctx.Get<hal_platform&>("platform");
+
+        constexpr uint32_t dataPsnImgDownscaleFactor = 2;
+        constexpr uint32_t dataPsnImgStartX = 10;
+        constexpr uint32_t dataPsnImgStartY = 35;
+
+        constexpr uint32_t dataPsnTxtInfStartX = 150;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        platform.data_psn->clear(COLOR_BLACK);
+
+        auto& model = ctx.Get<Model&>("model");
+
+        /* If the request has a valid size, set the image index. */
+        if (imgIndex < NUMBER_OF_FILES) {
+            if (!_SetAppCtxImageIdx(ctx, imgIndex)) {
+                return false;
+            }
+        }
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        auto curImIdx = ctx.Get<uint32_t>("imgIndex");
+
+        TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+        TfLiteTensor* inputTensor = model.GetInputTensor(0);
+
+        if (!inputTensor->dims) {
+            printf_err("Invalid input tensor dims\n");
+            return false;
+        } else if (inputTensor->dims->size < 3) {
+            printf_err("Input tensor dimension should be >= 3\n");
+            return false;
+        }
+
+        TfLiteIntArray* inputShape = model.GetInputShape(0);
+
+        const uint32_t nCols = inputShape->data[arm::app::MobileNetModel::ms_inputColsIdx];
+        const uint32_t nRows = inputShape->data[arm::app::MobileNetModel::ms_inputRowsIdx];
+        const uint32_t nChannels = inputShape->data[arm::app::MobileNetModel::ms_inputChannelsIdx];
+
+        std::vector<ClassificationResult> results;
+
+        do {
+            /* Strings for presentation/logging. */
+            std::string str_inf{"Running inference... "};
+
+            /* Copy over the data. */
+            _LoadImageIntoTensor(ctx.Get<uint32_t>("imgIndex"), inputTensor);
+
+            /* Display this image on the LCD. */
+            platform.data_psn->present_data_image(
+                (uint8_t*) inputTensor->data.data,
+                nCols, nRows, nChannels,
+                dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+
+            /* If the data is signed. */
+            if (model.IsDataSigned()) {
+                ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes);
+            }
+
+            /* Display message on the LCD - inference running. */
+            platform.data_psn->present_data_text(str_inf.c_str(), str_inf.size(),
+                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            /* Run inference over this image. */
+            info("Running inference on image %u => %s\n", ctx.Get<uint32_t>("imgIndex"),
+                get_filename(ctx.Get<uint32_t>("imgIndex")));
+
+            RunInference(platform, model);
+
+            /* Erase. */
+            str_inf = std::string(str_inf.size(), ' ');
+            platform.data_psn->present_data_text(str_inf.c_str(), str_inf.size(),
+                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            auto& classifier = ctx.Get<ImgClassClassifier&>("classifier");
+            classifier.GetClassificationResults(outputTensor, results,
+                                                ctx.Get<std::vector <std::string>&>("labels"),
+                                                5);
+
+            /* Add results to context for access outside handler. */
+            ctx.Set<std::vector<ClassificationResult>>("results", results);
+
+#if VERIFY_TEST_OUTPUT
+            arm::app::DumpTensor(outputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+
+            if (!_PresentInferenceResult(platform, results)) {
+                return false;
+            }
+
+            _IncrementAppCtxImageIdx(ctx);
+
+        } while (runAll && ctx.Get<uint32_t>("imgIndex") != curImIdx);
+
+        return true;
+    }
+
+    static bool _LoadImageIntoTensor(const uint32_t imIdx, TfLiteTensor* inputTensor)
+    {
+        const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ?
+                              inputTensor->bytes : IMAGE_DATA_SIZE;
+        const uint8_t* imgSrc = get_img_array(imIdx);
+        if (nullptr == imgSrc) {
+            printf_err("Failed to get image index %u (max: %u)\n", imIdx,
+                       NUMBER_OF_FILES - 1);
+            return false;
+        }
+
+        memcpy(inputTensor->data.data, imgSrc, copySz);
+        debug("Image %u loaded\n", imIdx);
+        return true;
+    }
+
+    static void _IncrementAppCtxImageIdx(ApplicationContext& ctx)
+    {
+        auto curImIdx = ctx.Get<uint32_t>("imgIndex");
+
+        if (curImIdx + 1 >= NUMBER_OF_FILES) {
+            ctx.Set<uint32_t>("imgIndex", 0);
+            return;
+        }
+        ++curImIdx;
+        ctx.Set<uint32_t>("imgIndex", curImIdx);
+    }
+
+    static bool _SetAppCtxImageIdx(ApplicationContext& ctx, const uint32_t idx)
+    {
+        if (idx >= NUMBER_OF_FILES) {
+            printf_err("Invalid idx %u (expected less than %u)\n",
+                       idx, NUMBER_OF_FILES);
+            return false;
+        }
+        ctx.Set<uint32_t>("imgIndex", idx);
+        return true;
+    }
+
+    static bool _PresentInferenceResult(hal_platform& platform,
+                                        const std::vector<ClassificationResult>& results)
+    {
+        constexpr uint32_t dataPsnTxtStartX1 = 150;
+        constexpr uint32_t dataPsnTxtStartY1 = 30;
+
+        constexpr uint32_t dataPsnTxtStartX2 = 10;
+        constexpr uint32_t dataPsnTxtStartY2 = 150;
+
+        constexpr uint32_t dataPsnTxtYIncr = 16;  /* Row index increment. */
+
+        platform.data_psn->set_text_color(COLOR_GREEN);
+
+        /* Display each result. */
+        uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+        uint32_t rowIdx2 = dataPsnTxtStartY2;
+
+        for (uint32_t i = 0; i < results.size(); ++i) {
+            std::string resultStr =
+                std::to_string(i + 1) + ") " +
+                std::to_string(results[i].m_labelIdx) +
+                " (" + std::to_string(results[i].m_normalisedVal) + ")";
+
+            platform.data_psn->present_data_text(
+                                        resultStr.c_str(), resultStr.size(),
+                                        dataPsnTxtStartX1, rowIdx1, 0);
+            rowIdx1 += dataPsnTxtYIncr;
+
+            resultStr = std::to_string(i + 1) + ") " + results[i].m_label;
+            platform.data_psn->present_data_text(
+                                        resultStr.c_str(), resultStr.size(),
+                                        dataPsnTxtStartX2, rowIdx2, 0);
+            rowIdx2 += dataPsnTxtYIncr;
+
+            info("%u) %u (%f) -> %s\n", i, results[i].m_labelIdx,
+                 results[i].m_normalisedVal, results[i].m_label.c_str());
+        }
+
+        return true;
+    }
+
+    static void ConvertImgToInt8(void* data, const size_t kMaxImageSize)
+    {
+        auto* tmp_req_data = (uint8_t*) data;
+        auto* tmp_signed_req_data = (int8_t*) data;
+
+        for (size_t i = 0; i < kMaxImageSize; i++) {
+            tmp_signed_req_data[i] = (int8_t) (
+                (int32_t) (tmp_req_data[i]) - 128);
+        }
+    }
+
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/img_class/usecase.cmake b/source/use_case/img_class/usecase.cmake
new file mode 100644
index 0000000..440eabe
--- /dev/null
+++ b/source/use_case/img_class/usecase.cmake
@@ -0,0 +1,125 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# If the path to a directory or source file has been defined,
+# get the type here (FILEPATH or PATH):
+if (DEFINED ${use_case}_FILE_PATH)
+    get_path_type(${${use_case}_FILE_PATH} PATH_TYPE)
+    # Set the default type if path is not a dir or file path (or undefined)
+    if (NOT ${PATH_TYPE} STREQUAL PATH AND NOT ${PATH_TYPE} STREQUAL FILEPATH)
+        message(FATAL_ERROR "Invalid ${use_case}_FILE_PATH. It should be a dir or file path.")
+    endif()
+else()
+    # Default is a directory path
+    set(PATH_TYPE PATH)
+endif()
+
+message(STATUS "${use_case}_FILE_PATH is of type: ${PATH_TYPE}")
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom image files to use, or path to a single image, in the evaluation application"
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+    ${PATH_TYPE})
+
+USER_OPTION(${use_case}_IMAGE_SIZE "Square image size in pixels. Images will be resized to this size."
+    224
+    STRING)
+
+USER_OPTION(${use_case}_LABELS_TXT_FILE "Labels' txt file for the chosen model"
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/labels/labels_mobilenet_v2_1.0_224.txt
+    FILEPATH)
+
+# Generate input files
+generate_images_code("${${use_case}_FILE_PATH}"
+                     ${SRC_GEN_DIR}
+                     ${INC_GEN_DIR}
+                     "${${use_case}_IMAGE_SIZE}")
+
+# Generate labels file
+set(${use_case}_LABELS_CPP_FILE Labels)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE}"
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+)
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+    0x00200000
+    STRING)
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH)
+
+    set(MODEL_RESOURCES_DIR     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY         ${MODEL_RESOURCES_DIR})
+    set(MODEL_FILENAME          mobilenet_v2_1.0_224_quantized_1_default_1.tflite)
+    set(DEFAULT_MODEL_PATH      ${MODEL_RESOURCES_DIR}/${MODEL_FILENAME})
+
+    # Download the default model
+    set(ZOO_COMMON_SUBPATH      "models/image_classification/mobilenet_v2_1.0_224/tflite_uint8")
+    set(ZOO_MODEL_SUBPATH       "${ZOO_COMMON_SUBPATH}/${MODEL_FILENAME}")
+
+    download_file_from_modelzoo(${ZOO_MODEL_SUBPATH}    ${DEFAULT_MODEL_PATH})
+
+    if (ETHOS_U55_ENABLED)
+        message(STATUS
+            "Ethos-U55 is enabled, but the model downloaded is not optimized by vela. "
+            "To use Ethos-U55 acceleration, optimise the downloaded model and pass it "
+            "as ${use_case}_MODEL_TFLITE_PATH to the CMake configuration.")
+    endif()
+
+    # If the target platform is native
+    if (${TARGET_PLATFORM} STREQUAL native)
+
+        # Download test vectors
+        set(ZOO_TEST_IFM_SUBPATH    "${ZOO_COMMON_SUBPATH}/testing_input/input/0.npy")
+        set(ZOO_TEST_OFM_SUBPATH    "${ZOO_COMMON_SUBPATH}/testing_output/output/0.npy")
+
+        set(${use_case}_TEST_IFM    ${MODEL_RESOURCES_DIR}/ifm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}")
+        set(${use_case}_TEST_OFM    ${MODEL_RESOURCES_DIR}/ofm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}")
+
+        download_file_from_modelzoo(${ZOO_TEST_IFM_SUBPATH} ${${use_case}_TEST_IFM})
+        download_file_from_modelzoo(${ZOO_TEST_OFM_SUBPATH} ${${use_case}_TEST_OFM})
+
+        set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+        set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+        file(MAKE_DIRECTORY ${TEST_SRC_GEN_DIR} ${TEST_INC_GEN_DIR})
+
+        # Generate test data files to be included in x86 tests
+        generate_test_data_code(
+                            INPUT_DIR "${DOWNLOAD_DEP_DIR}/${use_case}"
+                            DESTINATION_SRC ${TEST_SRC_GEN_DIR}
+                            DESTINATION_HDR ${TEST_INC_GEN_DIR}
+                            USECASE  "${use_case}")
+    endif()
+
+else()
+    set(DEFAULT_MODEL_PATH  "N/A")
+endif()
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH
+    )
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+    )
diff --git a/source/use_case/inference_runner/include/TestModel.hpp b/source/use_case/inference_runner/include/TestModel.hpp
new file mode 100644
index 0000000..0b3e9b9
--- /dev/null
+++ b/source/use_case/inference_runner/include/TestModel.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef INF_RUNNER_TESTMODEL_HPP
+#define INF_RUNNER_TESTMODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+    class TestModel : public Model {
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::AllOpsResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance, not needed as using AllOpsResolver. */
+        bool EnlistOperations() override {return false;}
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+
+        /* No need to define individual ops at the cost of extra memory. */
+        tflite::AllOpsResolver _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* INF_RUNNER_TESTMODEL_HPP */
\ No newline at end of file
diff --git a/source/use_case/inference_runner/include/UseCaseHandler.hpp b/source/use_case/inference_runner/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..4962650
--- /dev/null
+++ b/source/use_case/inference_runner/include/UseCaseHandler.hpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef INF_RUNNER_EVT_HANDLER_HPP
+#define INF_RUNNER_EVT_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief       Handles the inference event.
+     * @param[in]   ctx   Pointer to the application context.
+     * @return      true or false based on execution success.
+     **/
+    bool RunInferenceHandler(ApplicationContext& ctx);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* INF_RUNNER_EVT_HANDLER_HPP */
\ No newline at end of file
diff --git a/source/use_case/inference_runner/src/MainLoop.cc b/source/use_case/inference_runner/src/MainLoop.cc
new file mode 100644
index 0000000..b110a24
--- /dev/null
+++ b/source/use_case/inference_runner/src/MainLoop.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"                    /* Brings in platform definitions. */
+#include "TestModel.hpp"            /* Model class for running inference. */
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include "UseCaseCommonUtils.hpp"   /* Utils functions. */
+
+enum opcodes
+{
+    MENU_OPT_RUN_INF_NEXT = 1,       /* Run on next vector. */
+    MENU_OPT_SHOW_MODEL_INFO,        /* Show model info. */
+};
+
+void main_loop(hal_platform& platform)
+{
+    arm::app::TestModel model;  /* Model wrapper object. */
+
+    /* Load the model. */
+    if (!model.Init()) {
+        printf_err("Failed to initialise model\n");
+        return;
+    }
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("imgIndex", 0);
+
+    /* Loop. */
+    if (RunInferenceHandler(caseContext)) {
+        info("Inference completed.\n");
+    } else {
+        printf_err("Inference failed.\n");
+    }
+}
diff --git a/source/use_case/inference_runner/src/TestModel.cc b/source/use_case/inference_runner/src/TestModel.cc
new file mode 100644
index 0000000..0926a96
--- /dev/null
+++ b/source/use_case/inference_runner/src/TestModel.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "TestModel.hpp"
+
+#include "hal.h"
+
+const tflite::AllOpsResolver& arm::app::TestModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::TestModel::ModelPointer()
+{
+    return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::TestModel::ModelSize()
+{
+    return GetModelLen();
+}
\ No newline at end of file
diff --git a/source/use_case/inference_runner/src/UseCaseHandler.cc b/source/use_case/inference_runner/src/UseCaseHandler.cc
new file mode 100644
index 0000000..ac4ea47
--- /dev/null
+++ b/source/use_case/inference_runner/src/UseCaseHandler.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "TestModel.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "hal.h"
+
+#include <cstdlib>
+
+namespace arm {
+namespace app {
+
+    bool RunInferenceHandler(ApplicationContext& ctx)
+    {
+        auto& platform = ctx.Get<hal_platform&>("platform");
+        auto& model = ctx.Get<Model&>("model");
+
+        constexpr uint32_t dataPsnTxtInfStartX = 150;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        const size_t numInputs = model.GetNumInputs();
+
+        /* Populate each input tensor with random data. */
+        for (size_t inputIndex = 0; inputIndex < numInputs; inputIndex++) {
+
+            TfLiteTensor* inputTensor = model.GetInputTensor(inputIndex);
+
+            debug("Populating input tensor %zu@%p\n", inputIndex, inputTensor);
+            debug("Total input size to be populated: %zu\n", inputTensor->bytes);
+
+            /* Create a random input. */
+            if (inputTensor->bytes > 0) {
+
+                uint8_t* tData = tflite::GetTensorData<uint8_t>(inputTensor);
+
+                for (size_t j = 0; j < inputTensor->bytes; ++j) {
+                    tData[j] = static_cast<uint8_t>(std::rand() & 0xFF);
+                }
+            }
+        }
+
+        /* Strings for presentation/logging. */
+        std::string str_inf{"Running inference... "};
+
+        /* Display message on the LCD - inference running. */
+        platform.data_psn->present_data_text(
+                                str_inf.c_str(), str_inf.size(),
+                                dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+        RunInference(platform, model);
+
+        /* Erase. */
+        str_inf = std::string(str_inf.size(), ' ');
+        platform.data_psn->present_data_text(
+                                str_inf.c_str(), str_inf.size(),
+                                dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+#if VERIFY_TEST_OUTPUT
+        for (size_t outputIndex = 0; outputIndex < model.GetNumOutputs(); outputIndex++) {
+            arm::app::DumpTensor(model.GetOutputTensor(outputIndex));
+        }
+#endif /* VERIFY_TEST_OUTPUT */
+
+        return true;
+    }
+
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/inference_runner/usecase.cmake b/source/use_case/inference_runner/usecase.cmake
new file mode 100644
index 0000000..77b1ae1
--- /dev/null
+++ b/source/use_case/inference_runner/usecase.cmake
@@ -0,0 +1,57 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+    0x00200000
+    STRING)
+
+generate_default_input_code(${INC_GEN_DIR})
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH)
+
+    set(MODEL_RESOURCES_DIR     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY         ${MODEL_RESOURCES_DIR})
+    set(MODEL_FILENAME          dnn_s_quantized.tflite)
+    set(DEFAULT_MODEL_PATH      ${MODEL_RESOURCES_DIR}/${MODEL_FILENAME})
+
+    # Download the default model
+    set(ZOO_COMMON_SUBPATH      "models/keyword_spotting/dnn_small/tflite_int8/")
+    set(ZOO_MODEL_SUBPATH       "${ZOO_COMMON_SUBPATH}/${MODEL_FILENAME}")
+
+    download_file_from_modelzoo(${ZOO_MODEL_SUBPATH}    ${DEFAULT_MODEL_PATH})
+
+    if (ETHOS_U55_ENABLED)
+        message(STATUS
+            "Ethos-U55 is enabled, but the model downloaded is not optimized by vela. "
+            "To use Ethos-U55 acceleration, optimise the downloaded model and pass it "
+            "as ${use_case}_MODEL_TFLITE_PATH to the CMake configuration.")
+    endif()
+
+else()
+    set(DEFAULT_MODEL_PATH  "N/A")
+endif()
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH)
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+)
diff --git a/source/use_case/kws/include/DsCnnMfcc.hpp b/source/use_case/kws/include/DsCnnMfcc.hpp
new file mode 100644
index 0000000..3f681af
--- /dev/null
+++ b/source/use_case/kws/include/DsCnnMfcc.hpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_DSCNN_MFCC_HPP
+#define KWS_DSCNN_MFCC_HPP
+
+#include "Mfcc.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* Class to provide DS-CNN specific MFCC calculation requirements. */
+    class DsCnnMFCC : public MFCC {
+
+    public:
+        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
+        static constexpr uint32_t  ms_defaultNumFbankBins =    40;
+        static constexpr uint32_t  ms_defaultMelLoFreq    =    20;
+        static constexpr uint32_t  ms_defaultMelHiFreq    =  4000;
+        static constexpr bool      ms_defaultUseHtkMethod =  true;
+
+        explicit DsCnnMFCC(const size_t numFeats, const size_t frameLen)
+            :  MFCC(MfccParams(
+                        ms_defaultSamplingFreq, ms_defaultNumFbankBins,
+                        ms_defaultMelLoFreq, ms_defaultMelHiFreq,
+                        numFeats, frameLen, ms_defaultUseHtkMethod))
+        {}
+        DsCnnMFCC()  = delete;
+        ~DsCnnMFCC() = default;
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_DSCNN_MFCC_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws/include/DsCnnModel.hpp b/source/use_case/kws/include/DsCnnModel.hpp
new file mode 100644
index 0000000..a4e7110
--- /dev/null
+++ b/source/use_case/kws/include/DsCnnModel.hpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_DSCNNMODEL_HPP
+#define KWS_DSCNNMODEL_HPP
+
+#include "Model.hpp"
+
+extern const int g_FrameLength;
+extern const int g_FrameStride;
+extern const float g_ScoreThreshold;
+
+namespace arm {
+namespace app {
+
+    class DsCnnModel : public Model {
+    public:
+        /* Indices for the expected model - based on input and output tensor shapes */
+        static constexpr uint32_t ms_inputRowsIdx = 2;
+        static constexpr uint32_t ms_inputColsIdx = 3;
+        static constexpr uint32_t ms_outputRowsIdx = 2;
+        static constexpr uint32_t ms_outputColsIdx = 3;
+    
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int _ms_maxOpCnt = 8;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<_ms_maxOpCnt> _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_DSCNNMODEL_HPP */
diff --git a/source/use_case/kws/include/KwsResult.hpp b/source/use_case/kws/include/KwsResult.hpp
new file mode 100644
index 0000000..5a26ce1
--- /dev/null
+++ b/source/use_case/kws/include/KwsResult.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_RESULT_HPP
+#define KWS_RESULT_HPP
+
+#include "ClassificationResult.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+namespace kws {
+
+    using ResultVec = std::vector < arm::app::ClassificationResult >;
+
+    /* Structure for holding kws result. */
+    class KwsResult {
+
+    public:
+        ResultVec       m_resultVec;        /* Container for "thresholded" classification results. */
+        float           m_timeStamp;        /* Audio timestamp for this result. */
+        uint32_t        m_inferenceNumber;  /* Corresponding inference number. */
+        float           m_threshold;        /* Threshold value for `m_resultVec`. */
+
+        KwsResult() = delete;
+        KwsResult(ResultVec&        resultVec,
+                  const float       timestamp,
+                  const uint32_t    inferenceIdx,
+                  const float       scoreThreshold) {
+
+            this->m_threshold = scoreThreshold;
+            this->m_timeStamp = timestamp;
+            this->m_inferenceNumber = inferenceIdx;
+
+            this->m_resultVec = ResultVec();
+            for (auto & i : resultVec) {
+                if (i.m_normalisedVal >= this->m_threshold) {
+                    this->m_resultVec.emplace_back(i);
+                }
+            }
+        }
+        ~KwsResult() = default;
+    };
+
+} /* namespace kws */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_RESULT_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws/include/UseCaseHandler.hpp b/source/use_case/kws/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..1eb742f
--- /dev/null
+++ b/source/use_case/kws/include/UseCaseHandler.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_EVT_HANDLER_HPP
+#define KWS_EVT_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief       Handles the inference event.
+     * @param[in]   ctx         Pointer to the application context.
+     * @param[in]   clipIndex   Index to the audio clip to classify.
+     * @param[in]   runAll      Flag to request classification of all the available audio clips.
+     * @return      true or false based on execution success.
+     **/
+    bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_EVT_HANDLER_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws/src/DsCnnModel.cc b/source/use_case/kws/src/DsCnnModel.cc
new file mode 100644
index 0000000..a093eb4
--- /dev/null
+++ b/source/use_case/kws/src/DsCnnModel.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::DsCnnModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+bool arm::app::DsCnnModel::EnlistOperations()
+{
+    this->_m_opResolver.AddReshape();
+    this->_m_opResolver.AddAveragePool2D();
+    this->_m_opResolver.AddConv2D();
+    this->_m_opResolver.AddDepthwiseConv2D();
+    this->_m_opResolver.AddFullyConnected();
+    this->_m_opResolver.AddRelu();
+    this->_m_opResolver.AddSoftmax();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+    return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::DsCnnModel::ModelPointer()
+{
+    return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::DsCnnModel::ModelSize()
+{
+    return GetModelLen();
+}
\ No newline at end of file
diff --git a/source/use_case/kws/src/MainLoop.cc b/source/use_case/kws/src/MainLoop.cc
new file mode 100644
index 0000000..24cb939
--- /dev/null
+++ b/source/use_case/kws/src/MainLoop.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "InputFiles.hpp"           /* For input audio clips. */
+#include "Classifier.hpp"           /* Classifier. */
+#include "DsCnnModel.hpp"           /* Model class for running inference. */
+#include "hal.h"                    /* Brings in platform definitions. */
+#include "Labels.hpp"               /* For label strings. */
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include "UseCaseCommonUtils.hpp"   /* Utils functions. */
+
+using KwsClassifier = arm::app::Classifier;
+
+enum opcodes
+{
+    MENU_OPT_RUN_INF_NEXT = 1,       /* Run on next vector. */
+    MENU_OPT_RUN_INF_CHOSEN,         /* Run on a user provided vector index. */
+    MENU_OPT_RUN_INF_ALL,            /* Run inference on all. */
+    MENU_OPT_SHOW_MODEL_INFO,        /* Show model info. */
+    MENU_OPT_LIST_AUDIO_CLIPS        /* List the current baked audio clips. */
+};
+
+static void DisplayMenu()
+{
+    printf("\n\nUser input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Classify next audio clip\n", MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Classify audio clip at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run classification on all audio clips\n", MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List audio clips\n\n", MENU_OPT_LIST_AUDIO_CLIPS);
+    printf("  Choice: ");
+}
+
+void main_loop(hal_platform& platform)
+{
+    arm::app::DsCnnModel model;  /* Model wrapper object. */
+
+    /* Load the model. */
+    if (!model.Init()) {
+        printf_err("Failed to initialise model\n");
+        return;
+    }
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("clipIndex", 0);
+    caseContext.Set<int>("frameLength", g_FrameLength);
+    caseContext.Set<int>("frameStride", g_FrameStride);
+    caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);  /* Normalised score threshold. */
+
+    KwsClassifier classifier;  /* classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+    std::vector <std::string> labels;
+    GetLabelsVector(labels);
+
+    caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+    bool executionSuccessful = true;
+    constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+    /* Loop. */
+    do {
+        int menuOption = MENU_OPT_RUN_INF_NEXT;
+        if (bUseMenu) {
+            DisplayMenu();
+            menuOption = arm::app::ReadUserInputAsInt(platform);
+            printf("\n");
+        }
+        switch (menuOption) {
+            case MENU_OPT_RUN_INF_NEXT:
+                executionSuccessful = ClassifyAudioHandler(caseContext, caseContext.Get<uint32_t>("clipIndex"), false);
+                break;
+            case MENU_OPT_RUN_INF_CHOSEN: {
+                printf("    Enter the audio clip index [0, %d]: ", NUMBER_OF_FILES-1);
+                auto clipIndex = static_cast<uint32_t>(arm::app::ReadUserInputAsInt(platform));
+                executionSuccessful = ClassifyAudioHandler(caseContext, clipIndex, false);
+                break;
+            }
+            case MENU_OPT_RUN_INF_ALL:
+                executionSuccessful = ClassifyAudioHandler(caseContext,caseContext.Get<uint32_t>("clipIndex"), true);
+                break;
+            case MENU_OPT_SHOW_MODEL_INFO:
+                executionSuccessful = model.ShowModelInfoHandler();
+                break;
+            case MENU_OPT_LIST_AUDIO_CLIPS:
+                executionSuccessful = ListFilesHandler(caseContext);
+                break;
+            default:
+                printf("Incorrect choice, try again.");
+                break;
+        }
+    } while (executionSuccessful && bUseMenu);
+    info("Main loop terminated.\n");
+}
\ No newline at end of file
diff --git a/source/use_case/kws/src/UseCaseHandler.cc b/source/use_case/kws/src/UseCaseHandler.cc
new file mode 100644
index 0000000..872d323
--- /dev/null
+++ b/source/use_case/kws/src/UseCaseHandler.cc
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "InputFiles.hpp"
+#include "Classifier.hpp"
+#include "DsCnnModel.hpp"
+#include "hal.h"
+#include "DsCnnMfcc.hpp"
+#include "AudioUtils.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "KwsResult.hpp"
+
+#include <vector>
+#include <functional>
+
+using KwsClassifier = arm::app::Classifier;
+
+namespace arm {
+namespace app {
+
+    /**
+    * @brief            Helper function to increment current audio clip index.
+    * @param[in,out]    ctx   Pointer to the application context object.
+    **/
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+    /**
+     * @brief           Helper function to set the audio clip index.
+     * @param[in,out]   ctx   Pointer to the application context object.
+     * @param[in]       idx   Value to be set.
+     * @return          true if index is set, false otherwise.
+     **/
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
+
+    /**
+     * @brief           Presents inference results using the data presentation
+     *                  object.
+     * @param[in]       platform    Reference to the hal platform object.
+     * @param[in]       results     Vector of classification results to be displayed.
+     * @param[in]       infTimeMs   Inference time in milliseconds, if available,
+     *                              otherwise, this can be passed in as 0.
+     * @return          true if successful, false otherwise.
+     **/
+    static bool _PresentInferenceResult(hal_platform& platform,
+                                        const std::vector<arm::app::kws::KwsResult>& results);
+
+    /**
+     * @brief Returns a function to perform feature calculation and populates input tensor data with
+     * MFCC data.
+     *
+     * Input tensor data type check is performed to choose correct MFCC feature data type.
+     * If tensor has an integer data type then original features are quantised.
+     *
+     * Warning: MFCC calculator provided as input must have the same life scope as returned function.
+     *
+     * @param[in]       mfcc          MFCC feature calculator.
+     * @param[in,out]   inputTensor   Input tensor pointer to store calculated features.
+     * @param[in]       cacheSize     Size of the feature vectors cache (number of feature vectors).
+     * @return          Function to be called providing audio sample and sliding window index.
+     */
+    static std::function<void (std::vector<int16_t>&, int, bool, size_t)>
+            GetFeatureCalculator(audio::DsCnnMFCC&  mfcc,
+                                 TfLiteTensor*      inputTensor,
+                                 size_t             cacheSize);
+
+    /* Audio inference handler. */
+    bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
+    {
+        auto& platform = ctx.Get<hal_platform&>("platform");
+
+        constexpr uint32_t dataPsnTxtInfStartX = 20;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+        constexpr int minTensorDims = static_cast<int>(
+            (arm::app::DsCnnModel::ms_inputRowsIdx > arm::app::DsCnnModel::ms_inputColsIdx)?
+             arm::app::DsCnnModel::ms_inputRowsIdx : arm::app::DsCnnModel::ms_inputColsIdx);
+
+        platform.data_psn->clear(COLOR_BLACK);
+
+        auto& model = ctx.Get<Model&>("model");
+
+        /* If the request has a valid size, set the audio index. */
+        if (clipIndex < NUMBER_OF_FILES) {
+            if (!_SetAppCtxClipIdx(ctx, clipIndex)) {
+                return false;
+            }
+        }
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        const auto frameLength = ctx.Get<int>("frameLength");
+        const auto frameStride = ctx.Get<int>("frameStride");
+        const auto scoreThreshold = ctx.Get<float>("scoreThreshold");
+        auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
+
+        TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+        TfLiteTensor* inputTensor = model.GetInputTensor(0);
+
+        if (!inputTensor->dims) {
+            printf_err("Invalid input tensor dims\n");
+            return false;
+        } else if (inputTensor->dims->size < minTensorDims) {
+            printf_err("Input tensor dimension should be >= %d\n", minTensorDims);
+            return false;
+        }
+
+        TfLiteIntArray* inputShape = model.GetInputShape(0);
+        const uint32_t kNumCols = inputShape->data[arm::app::DsCnnModel::ms_inputColsIdx];
+        const uint32_t kNumRows = inputShape->data[arm::app::DsCnnModel::ms_inputRowsIdx];
+
+        audio::DsCnnMFCC mfcc = audio::DsCnnMFCC(kNumCols, frameLength);
+        mfcc.Init();
+
+        /* Deduce the data length required for 1 inference from the network parameters. */
+        auto audioDataWindowSize = kNumRows * frameStride + (frameLength - frameStride);
+        auto mfccWindowSize = frameLength;
+        auto mfccWindowStride = frameStride;
+
+        /* We choose to move by half the window size => for a 1 second window size
+         * there is an overlap of 0.5 seconds. */
+        auto audioDataStride = audioDataWindowSize / 2;
+
+        /* To have the previously calculated features re-usable, stride must be multiple
+         * of MFCC features window stride. */
+        if (0 != audioDataStride % mfccWindowStride) {
+
+            /* Reduce the stride. */
+            audioDataStride -= audioDataStride % mfccWindowStride;
+        }
+
+        auto nMfccVectorsInAudioStride = audioDataStride/mfccWindowStride;
+
+        /* We expect to be sampling 1 second worth of data at a time.
+         * NOTE: This is only used for time stamp calculation. */
+        const float secondsPerSample = 1.0/audio::DsCnnMFCC::ms_defaultSamplingFreq;
+
+        do {
+            auto currentIndex = ctx.Get<uint32_t>("clipIndex");
+
+            /* Creating a mfcc features sliding window for the data required for 1 inference. */
+            auto audioMFCCWindowSlider = audio::SlidingWindow<const int16_t>(
+                                            get_audio_array(currentIndex),
+                                            audioDataWindowSize, mfccWindowSize,
+                                            mfccWindowStride);
+
+            /* Creating a sliding window through the whole audio clip. */
+            auto audioDataSlider = audio::SlidingWindow<const int16_t>(
+                                        get_audio_array(currentIndex),
+                                        get_audio_array_size(currentIndex),
+                                        audioDataWindowSize, audioDataStride);
+
+            /* Calculate number of the feature vectors in the window overlap region.
+             * These feature vectors will be reused.*/
+            auto numberOfReusedFeatureVectors = audioMFCCWindowSlider.TotalStrides() + 1
+                                                - nMfccVectorsInAudioStride;
+
+            /* Construct feature calculation function. */
+            auto mfccFeatureCalc = GetFeatureCalculator(mfcc, inputTensor,
+                                                        numberOfReusedFeatureVectors);
+
+            if (!mfccFeatureCalc){
+                return false;
+            }
+
+            /* Declare a container for results. */
+            std::vector<arm::app::kws::KwsResult> results;
+
+            /* Display message on the LCD - inference running. */
+            std::string str_inf{"Running inference... "};
+            platform.data_psn->present_data_text(
+                                str_inf.c_str(), str_inf.size(),
+                                dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+            info("Running inference on audio clip %u => %s\n", currentIndex,
+                 get_filename(currentIndex));
+
+            /* Start sliding through audio clip. */
+            while (audioDataSlider.HasNext()) {
+                const int16_t *inferenceWindow = audioDataSlider.Next();
+
+                /* We moved to the next window - set the features sliding to the new address. */
+                audioMFCCWindowSlider.Reset(inferenceWindow);
+
+                /* The first window does not have cache ready. */
+                bool useCache = audioDataSlider.Index() > 0 && numberOfReusedFeatureVectors > 0;
+
+                /* Start calculating features inside one audio sliding window. */
+                while (audioMFCCWindowSlider.HasNext()) {
+                    const int16_t *mfccWindow = audioMFCCWindowSlider.Next();
+                    std::vector<int16_t> mfccAudioData = std::vector<int16_t>(mfccWindow,
+                                                            mfccWindow + mfccWindowSize);
+                    /* Compute features for this window and write them to input tensor. */
+                    mfccFeatureCalc(mfccAudioData,
+                                    audioMFCCWindowSlider.Index(),
+                                    useCache,
+                                    nMfccVectorsInAudioStride);
+                }
+
+                info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
+                     audioDataSlider.TotalStrides() + 1);
+
+                /* Run inference over this audio clip sliding window. */
+                arm::app::RunInference(platform, model);
+
+                std::vector<ClassificationResult> classificationResult;
+                auto& classifier = ctx.Get<KwsClassifier&>("classifier");
+                classifier.GetClassificationResults(outputTensor, classificationResult,
+                                                    ctx.Get<std::vector<std::string>&>("labels"), 1);
+
+                results.emplace_back(kws::KwsResult(classificationResult,
+                    audioDataSlider.Index() * secondsPerSample * audioDataStride,
+                    audioDataSlider.Index(), scoreThreshold));
+
+#if VERIFY_TEST_OUTPUT
+                arm::app::DumpTensor(outputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+            } /* while (audioDataSlider.HasNext()) */
+
+            /* Erase. */
+            str_inf = std::string(str_inf.size(), ' ');
+            platform.data_psn->present_data_text(
+                                str_inf.c_str(), str_inf.size(),
+                                dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
+
+            ctx.Set<std::vector<arm::app::kws::KwsResult>>("results", results);
+
+            if (!_PresentInferenceResult(platform, results)) {
+                return false;
+            }
+
+            _IncrementAppCtxClipIdx(ctx);
+
+        } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
+
+        return true;
+    }
+
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx)
+    {
+        auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
+
+        if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
+            ctx.Set<uint32_t>("clipIndex", 0);
+            return;
+        }
+        ++curAudioIdx;
+        ctx.Set<uint32_t>("clipIndex", curAudioIdx);
+    }
+
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, const uint32_t idx)
+    {
+        if (idx >= NUMBER_OF_FILES) {
+            printf_err("Invalid idx %u (expected less than %u)\n",
+                       idx, NUMBER_OF_FILES);
+            return false;
+        }
+        ctx.Set<uint32_t>("clipIndex", idx);
+        return true;
+    }
+
+    static bool _PresentInferenceResult(hal_platform& platform,
+                                        const std::vector<arm::app::kws::KwsResult>& results)
+    {
+        constexpr uint32_t dataPsnTxtStartX1 = 20;
+        constexpr uint32_t dataPsnTxtStartY1 = 30;
+        constexpr uint32_t dataPsnTxtYIncr   = 16;  /* Row index increment. */
+
+        platform.data_psn->set_text_color(COLOR_GREEN);
+
+        /* Display each result */
+        uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+
+        for (uint32_t i = 0; i < results.size(); ++i) {
+
+            std::string topKeyword{"<none>"};
+            float score = 0.f;
+
+            if (results[i].m_resultVec.size()) {
+                topKeyword = results[i].m_resultVec[0].m_label;
+                score = results[i].m_resultVec[0].m_normalisedVal;
+            }
+
+            std::string resultStr =
+                std::string{"@"} + std::to_string(results[i].m_timeStamp) +
+                std::string{"s: "} + topKeyword + std::string{" ("} +
+                std::to_string(static_cast<int>(score * 100)) + std::string{"%)"};
+
+            platform.data_psn->present_data_text(
+                                    resultStr.c_str(), resultStr.size(),
+                                    dataPsnTxtStartX1, rowIdx1, false);
+            rowIdx1 += dataPsnTxtYIncr;
+
+            info("For timestamp: %f (inference #: %u); threshold: %f\n",
+                    results[i].m_timeStamp, results[i].m_inferenceNumber,
+                    results[i].m_threshold);
+            for (uint32_t j = 0; j < results[i].m_resultVec.size(); ++j) {
+                info("\t\tlabel @ %u: %s, score: %f\n", j,
+                    results[i].m_resultVec[j].m_label.c_str(),
+                    results[i].m_resultVec[j].m_normalisedVal);
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * @brief Generic feature calculator factory.
+     *
+     * Returns lambda function to compute features using features cache.
+     * Real features math is done by a lambda function provided as a parameter.
+     * Features are written to input tensor memory.
+     *
+     * @tparam T            Feature vector type.
+     * @param inputTensor   Model input tensor pointer.
+     * @param cacheSize     Number of feature vectors to cache. Defined by the sliding window overlap.
+     * @param compute       Features calculator function.
+     * @return              Lambda function to compute features.
+     */
+    template<class T>
+    std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>
+    _FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
+                 std::function<std::vector<T> (std::vector<int16_t>& )> compute)
+    {
+        /* Feature cache to be captured by lambda function. */
+        static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
+
+        return [=](std::vector<int16_t>& audioDataWindow,
+                                     size_t index,
+                                     bool useCache,
+                                     size_t featuresOverlapIndex)
+        {
+            T *tensorData = tflite::GetTensorData<T>(inputTensor);
+            std::vector<T> features;
+
+            /* Reuse features from cache if cache is ready and sliding windows overlap.
+             * Overlap is in the beginning of sliding window with a size of a feature cache. */
+            if (useCache && index < featureCache.size()) {
+                features = std::move(featureCache[index]);
+            } else {
+                features = std::move(compute(audioDataWindow));
+            }
+            auto size = features.size();
+            auto sizeBytes = sizeof(T) * size;
+            std::memcpy(tensorData + (index * size), features.data(), sizeBytes);
+
+            /* Start renewing cache as soon iteration goes out of the windows overlap. */
+            if (index >= featuresOverlapIndex) {
+                featureCache[index - featuresOverlapIndex] = std::move(features);
+            }
+        };
+    }
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
+        _FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
+                            size_t cacheSize,
+                            std::function<std::vector<int8_t> (std::vector<int16_t>& )> compute);
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
+        _FeatureCalc<uint8_t>(TfLiteTensor* inputTensor,
+                              size_t cacheSize,
+                              std::function<std::vector<uint8_t> (std::vector<int16_t>& )> compute);
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
+        _FeatureCalc<int16_t>(TfLiteTensor* inputTensor,
+                              size_t cacheSize,
+                              std::function<std::vector<int16_t> (std::vector<int16_t>& )> compute);
+
+    template std::function<void(std::vector<int16_t>&, size_t, bool, size_t)>
+        _FeatureCalc<float>(TfLiteTensor *inputTensor,
+                            size_t cacheSize,
+                            std::function<std::vector<float>(std::vector<int16_t>&)> compute);
+
+
+    static std::function<void (std::vector<int16_t>&, int, bool, size_t)>
+    GetFeatureCalculator(audio::DsCnnMFCC& mfcc, TfLiteTensor* inputTensor, size_t cacheSize)
+    {
+        std::function<void (std::vector<int16_t>&, size_t, bool, size_t)> mfccFeatureCalc;
+
+        TfLiteQuantization quant = inputTensor->quantization;
+
+        if (kTfLiteAffineQuantization == quant.type) {
+
+            auto *quantParams = (TfLiteAffineQuantization *) quant.params;
+            const float quantScale = quantParams->scale->data[0];
+            const int quantOffset = quantParams->zero_point->data[0];
+
+            switch (inputTensor->type) {
+                case kTfLiteInt8: {
+                    mfccFeatureCalc = _FeatureCalc<int8_t>(inputTensor,
+                                                           cacheSize,
+                                                           [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                               return mfcc.MfccComputeQuant<int8_t>(audioDataWindow,
+                                                                                                    quantScale,
+                                                                                                    quantOffset);
+                                                           }
+                    );
+                    break;
+                }
+                case kTfLiteUInt8: {
+                    mfccFeatureCalc = _FeatureCalc<uint8_t>(inputTensor,
+                                                            cacheSize,
+                                                           [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                               return mfcc.MfccComputeQuant<uint8_t>(audioDataWindow,
+                                                                                                     quantScale,
+                                                                                                     quantOffset);
+                                                           }
+                    );
+                    break;
+                }
+                case kTfLiteInt16: {
+                    mfccFeatureCalc = _FeatureCalc<int16_t>(inputTensor,
+                                                            cacheSize,
+                                                            [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                                return mfcc.MfccComputeQuant<int16_t>(audioDataWindow,
+                                                                                                      quantScale,
+                                                                                                      quantOffset);
+                                                            }
+                    );
+                    break;
+                }
+                default:
+                    printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
+            }
+
+
+        } else {
+            mfccFeatureCalc = mfccFeatureCalc = _FeatureCalc<float>(inputTensor,
+                                                                    cacheSize,
+                                                                    [&mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                                        return mfcc.MfccCompute(audioDataWindow);
+                                                                    });
+        }
+        return mfccFeatureCalc;
+    }
+
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws/usecase.cmake b/source/use_case/kws/usecase.cmake
new file mode 100644
index 0000000..b5ac09e
--- /dev/null
+++ b/source/use_case/kws/usecase.cmake
@@ -0,0 +1,159 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# If the path to a directory or source file has been defined,
+# get the type here (FILEPATH or PATH):
+if (DEFINED ${use_case}_FILE_PATH)
+    get_path_type(${${use_case}_FILE_PATH} PATH_TYPE)
+
+    # Set the default type if path is not a dir or file path (or undefined)
+    if (NOT ${PATH_TYPE} STREQUAL PATH AND NOT ${PATH_TYPE} STREQUAL FILEPATH)
+        message(FATAL_ERROR "Invalid ${use_case}_FILE_PATH. It should be a dir or file path.")
+    endif()
+else()
+    # Default is a directory path
+    set(PATH_TYPE PATH)
+endif()
+
+message(STATUS "${use_case}_FILE_PATH is of type: ${PATH_TYPE}")
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single WAV file, to use in the evaluation application."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+    ${PATH_TYPE})
+
+USER_OPTION(${use_case}_LABELS_TXT_FILE "Labels' txt file for the chosen model."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/labels/ds_cnn_labels.txt
+    FILEPATH)
+
+USER_OPTION(${use_case}_AUDIO_RATE "Specify the target sampling rate. Default is 16000."
+    16000
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MONO "Specify if the audio needs to be converted to mono. Default is ON."
+    ON
+    BOOL)
+
+USER_OPTION(${use_case}_AUDIO_OFFSET "Specify the offset to start reading after this time (in seconds). Default is 0."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_DURATION "Specify the audio duration to load (in seconds). If set to 0 the entire audio will be processed."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_RES_TYPE "Specify re-sampling algorithm to use. By default is 'kaiser_best'."
+    kaiser_best
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MIN_SAMPLES "Specify the minimum number of samples to use. By default is 16000, if the audio is shorter will be automatically padded."
+    16000
+    STRING)
+
+USER_OPTION(${use_case}_MODEL_SCORE_THRESHOLD "Specify the score threshold [0.0, 1.0) that must be applied to the inference results for a label to be deemed valid."
+    0.9
+    STRING)
+
+# Generate input files
+generate_audio_code(${${use_case}_FILE_PATH} ${SRC_GEN_DIR} ${INC_GEN_DIR}
+    ${${use_case}_AUDIO_RATE}
+    ${${use_case}_AUDIO_MONO}
+    ${${use_case}_AUDIO_OFFSET}
+    ${${use_case}_AUDIO_DURATION}
+    ${${use_case}_AUDIO_RES_TYPE}
+    ${${use_case}_AUDIO_MIN_SAMPLES})
+
+# Generate labels file
+set(${use_case}_LABELS_CPP_FILE Labels)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE}"
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+)
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+    0x00100000
+    STRING)
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH)
+
+    set(MODEL_FILENAME          ds_cnn_clustered_int8.tflite)
+    set(MODEL_RESOURCES_DIR     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY         ${MODEL_RESOURCES_DIR})
+    set(DEFAULT_MODEL_PATH      ${MODEL_RESOURCES_DIR}/${MODEL_FILENAME})
+
+    # Download the default model
+    set(ZOO_COMMON_SUBPATH      "models/keyword_spotting/ds_cnn_large/tflite_clustered_int8")
+    set(ZOO_MODEL_SUBPATH       "${ZOO_COMMON_SUBPATH}/${MODEL_FILENAME}")
+
+    download_file_from_modelzoo(${ZOO_MODEL_SUBPATH}    ${DEFAULT_MODEL_PATH})
+
+    if (ETHOS_U55_ENABLED)
+        message(STATUS
+            "Ethos-U55 is enabled, but the model downloaded is not optimized by vela. "
+            "To use Ethos-U55 acceleration, optimise the downloaded model and pass it "
+            "as ${use_case}_MODEL_TFLITE_PATH to the CMake configuration.")
+    endif()
+
+    # If the target platform is native
+    if (${TARGET_PLATFORM} STREQUAL native)
+
+        # Download test vectors
+        set(ZOO_TEST_IFM_SUBPATH    "${ZOO_COMMON_SUBPATH}/testing_input/input_2/0.npy")
+        set(ZOO_TEST_OFM_SUBPATH    "${ZOO_COMMON_SUBPATH}/testing_output/Identity/0.npy")
+
+        set(${use_case}_TEST_IFM    ${MODEL_RESOURCES_DIR}/ifm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}")
+        set(${use_case}_TEST_OFM    ${MODEL_RESOURCES_DIR}/ofm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}")
+
+        download_file_from_modelzoo(${ZOO_TEST_IFM_SUBPATH} ${${use_case}_TEST_IFM})
+        download_file_from_modelzoo(${ZOO_TEST_OFM_SUBPATH} ${${use_case}_TEST_OFM})
+
+        set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+        set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+        file(MAKE_DIRECTORY ${TEST_SRC_GEN_DIR} ${TEST_INC_GEN_DIR})
+
+        # Generate test data files to be included in x86 tests
+        generate_test_data_code(
+                            INPUT_DIR "${DOWNLOAD_DEP_DIR}/${use_case}"
+                            DESTINATION_SRC ${TEST_SRC_GEN_DIR}
+                            DESTINATION_HDR ${TEST_INC_GEN_DIR}
+                            USECASE  "${use_case}")
+    endif()
+
+else()
+    set(DEFAULT_MODEL_PATH  "N/A")
+endif()
+
+set(EXTRA_MODEL_CODE
+    "/* Model parameters for ${use_case} */"
+    "extern const int   g_FrameLength    = 640"
+    "extern const int   g_FrameStride    = 320"
+    "extern const float g_ScoreThreshold = ${${use_case}_MODEL_SCORE_THRESHOLD}"
+    )
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH)
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+    EXPRESSIONS ${EXTRA_MODEL_CODE}
+)
diff --git a/source/use_case/kws_asr/include/AsrClassifier.hpp b/source/use_case/kws_asr/include/AsrClassifier.hpp
new file mode 100644
index 0000000..de18aa8
--- /dev/null
+++ b/source/use_case/kws_asr/include/AsrClassifier.hpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_CLASSIFIER_HPP
+#define ASR_CLASSIFIER_HPP
+
+#include "Classifier.hpp"
+
+namespace arm {
+namespace app {
+
+    class AsrClassifier : public Classifier {
+    public:
+        /**
+         * @brief       Gets the top N classification results from the
+         *              output vector.
+         * @param[in]   outputTensor   Inference output tensor from an NN model.
+         * @param[out]  vecResults     A vector of classification results
+         *                             populated by this function.
+         * @param[in]   labels         Labels vector to match classified classes
+         * @param[in]   topNCount      Number of top classifications to pick.
+         * @return      true if successful, false otherwise.
+         **/
+        bool GetClassificationResults(
+                TfLiteTensor* outputTensor,
+                std::vector<ClassificationResult>& vecResults,
+                const std::vector <std::string>& labels, uint32_t topNCount) override;
+
+    private:
+
+        /**
+         * @brief       Utility function that gets the top 1 classification results from the
+         *              output tensor (vector of vector).
+         * @param[in]   tensor       Inference output tensor from an NN model.
+         * @param[out]  vecResults   A vector of classification results
+         *                           populated by this function.
+         * @param[in]   labels       Labels vector to match classified classes.
+         * @param[in]   scale        Quantization scale.
+         * @param[in]   zeroPoint    Quantization zero point.
+         * @return      true if successful, false otherwise.
+         **/
+        template<typename T>
+        bool _GetTopResults(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint);
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_CLASSIFIER_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/AsrResult.hpp b/source/use_case/kws_asr/include/AsrResult.hpp
new file mode 100644
index 0000000..25fa9e8
--- /dev/null
+++ b/source/use_case/kws_asr/include/AsrResult.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_RESULT_HPP
+#define ASR_RESULT_HPP
+
+#include "ClassificationResult.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+namespace asr {
+
+    using ResultVec = std::vector<arm::app::ClassificationResult>;
+
+    /* Structure for holding asr result. */
+    class AsrResult {
+
+    public:
+        ResultVec       m_resultVec;        /* Container for "thresholded" classification results. */
+        float           m_timeStamp;        /* Audio timestamp for this result. */
+        uint32_t        m_inferenceNumber;  /* Corresponding inference number. */
+        float           m_threshold;        /* Threshold value for `m_resultVec` */
+
+        AsrResult() = delete;
+        AsrResult(ResultVec&        resultVec,
+                  const float       timestamp,
+                  const uint32_t    inferenceIdx,
+                  const float       scoreThreshold) {
+
+            this->m_threshold = scoreThreshold;
+            this->m_timeStamp = timestamp;
+            this->m_inferenceNumber = inferenceIdx;
+
+            this->m_resultVec = ResultVec();
+            for (auto& i : resultVec) {
+                if (i.m_normalisedVal >= this->m_threshold) {
+                    this->m_resultVec.emplace_back(i);
+                }
+            }
+        }
+        ~AsrResult() = default;
+    };
+
+} /* namespace asr */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* ASR_RESULT_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/DsCnnMfcc.hpp b/source/use_case/kws_asr/include/DsCnnMfcc.hpp
new file mode 100644
index 0000000..c97dd9d
--- /dev/null
+++ b/source/use_case/kws_asr/include/DsCnnMfcc.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_DSCNN_MFCC_HPP
+#define KWS_ASR_DSCNN_MFCC_HPP
+
+#include "Mfcc.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* Class to provide DS-CNN specific MFCC calculation requirements. */
+    class DsCnnMFCC : public MFCC {
+
+    public:
+        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
+        static constexpr uint32_t  ms_defaultNumFbankBins =    40;
+        static constexpr uint32_t  ms_defaultMelLoFreq    =    20;
+        static constexpr uint32_t  ms_defaultMelHiFreq    =  4000;
+        static constexpr bool      ms_defaultUseHtkMethod =  true;
+
+
+        explicit DsCnnMFCC(const size_t numFeats, const size_t frameLen)
+            :  MFCC(MfccParams(
+                        ms_defaultSamplingFreq, ms_defaultNumFbankBins,
+                        ms_defaultMelLoFreq, ms_defaultMelHiFreq,
+                        numFeats, frameLen, ms_defaultUseHtkMethod))
+        {}
+        DsCnnMFCC()  = delete;
+        ~DsCnnMFCC() = default;
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_DSCNN_MFCC_HPP */
diff --git a/source/use_case/kws_asr/include/DsCnnModel.hpp b/source/use_case/kws_asr/include/DsCnnModel.hpp
new file mode 100644
index 0000000..150a48c
--- /dev/null
+++ b/source/use_case/kws_asr/include/DsCnnModel.hpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_DSCNNMODEL_HPP
+#define KWS_ASR_DSCNNMODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+namespace kws {
+    extern const int g_FrameLength;
+    extern const int g_FrameStride;
+    extern const float g_ScoreThreshold;
+    extern const uint32_t g_NumMfcc;
+    extern const uint32_t g_NumAudioWins;
+} /* namespace kws */
+} /* namespace app */
+} /* namespace arm */
+
+namespace arm {
+namespace app {
+
+    class DsCnnModel : public Model {
+    public:
+        /* Indices for the expected model - based on input and output tensor shapes */
+        static constexpr uint32_t ms_inputRowsIdx = 2;
+        static constexpr uint32_t ms_inputColsIdx = 3;
+        static constexpr uint32_t ms_outputRowsIdx = 2;
+        static constexpr uint32_t ms_outputColsIdx = 3;
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int _ms_maxOpCnt = 10;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<_ms_maxOpCnt> _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_DSCNNMODEL_HPP */
diff --git a/source/use_case/kws_asr/include/KwsResult.hpp b/source/use_case/kws_asr/include/KwsResult.hpp
new file mode 100644
index 0000000..45bb790
--- /dev/null
+++ b/source/use_case/kws_asr/include/KwsResult.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_RESULT_HPP
+#define KWS_RESULT_HPP
+
+#include "ClassificationResult.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+namespace kws {
+
+    using ResultVec = std::vector < arm::app::ClassificationResult >;
+
+    /* Structure for holding kws result. */
+    class KwsResult {
+
+    public:
+        ResultVec       m_resultVec;        /* Container for "thresholded" classification results. */
+        float           m_timeStamp;        /* Audio timestamp for this result. */
+        uint32_t        m_inferenceNumber;  /* Corresponding inference number. */
+        float           m_threshold;        /* Threshold value for `m_resultVec.` */
+
+        KwsResult() = delete;
+        KwsResult(ResultVec&        resultVec,
+                  const float       timestamp,
+                  const uint32_t    inferenceIdx,
+                  const float       scoreThreshold) {
+
+            this->m_threshold = scoreThreshold;
+            this->m_timeStamp = timestamp;
+            this->m_inferenceNumber = inferenceIdx;
+
+            this->m_resultVec = ResultVec();
+            for (auto & i : resultVec) {
+                if (i.m_normalisedVal >= this->m_threshold) {
+                    this->m_resultVec.emplace_back(i);
+                }
+            }
+        }
+        ~KwsResult() = default;
+    };
+
+} /* namespace kws */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_RESULT_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/OutputDecode.hpp b/source/use_case/kws_asr/include/OutputDecode.hpp
new file mode 100644
index 0000000..2bbb29c
--- /dev/null
+++ b/source/use_case/kws_asr/include/OutputDecode.hpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_OUTPUT_DECODE_HPP
+#define KWS_ASR_OUTPUT_DECODE_HPP
+
+#include "AsrClassifier.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    /**
+     * @brief       Gets the top N classification results from the
+     *              output vector.
+     * @param[in]   tensor   Label output from classifier.
+     * @return      true if successful, false otherwise.
+    **/
+    std::string DecodeOutput(const std::vector<ClassificationResult>& vecResults);
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_OUTPUT_DECODE_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/UseCaseHandler.hpp b/source/use_case/kws_asr/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..1c60662
--- /dev/null
+++ b/source/use_case/kws_asr/include/UseCaseHandler.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_EVT_HANDLER_HPP
+#define KWS_ASR_EVT_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief       Handles the inference event.
+     * @param[in]   ctx         Pointer to the application context.
+     * @param[in]   clipIndex   Index to the audio clip to classify.
+     * @param[in]   runAll      Flag to request classification of all the available audio clips.
+     * @return      true or false based on execution success.
+     **/
+    bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_EVT_HANDLER_HPP */
diff --git a/source/use_case/kws_asr/include/Wav2LetterMfcc.hpp b/source/use_case/kws_asr/include/Wav2LetterMfcc.hpp
new file mode 100644
index 0000000..0852cbf
--- /dev/null
+++ b/source/use_case/kws_asr/include/Wav2LetterMfcc.hpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_WAV2LET_MFCC_HPP
+#define KWS_ASR_WAV2LET_MFCC_HPP
+
+#include "Mfcc.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    /* Class to provide Wav2Letter specific MFCC calculation requirements. */
+    class Wav2LetterMFCC : public MFCC {
+
+    public:
+        static constexpr uint32_t  ms_defaultSamplingFreq = 16000;
+        static constexpr uint32_t  ms_defaultNumFbankBins =   128;
+        static constexpr uint32_t  ms_defaultMelLoFreq    =     0;
+        static constexpr uint32_t  ms_defaultMelHiFreq    =  8000;
+        static constexpr bool      ms_defaultUseHtkMethod = false;
+
+        explicit Wav2LetterMFCC(const size_t numFeats, const size_t frameLen)
+            :  MFCC(MfccParams(
+                        ms_defaultSamplingFreq, ms_defaultNumFbankBins,
+                        ms_defaultMelLoFreq, ms_defaultMelHiFreq,
+                        numFeats, frameLen, ms_defaultUseHtkMethod))
+        {}
+
+        Wav2LetterMFCC()  = delete;
+        ~Wav2LetterMFCC() = default;
+
+    protected:
+
+        /**
+         * @brief       Overrides base class implementation of this function.
+         * @param[in]   fftVec                  Vector populated with FFT magnitudes.
+         * @param[in]   melFilterBank           2D Vector with filter bank weights.
+         * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
+         *                                      to be used for each bin.
+         * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
+         *                                      to be used for each bin.
+         * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
+         *                                      populated.
+         * @return      true if successful, false otherwise.
+         */
+        bool ApplyMelFilterBank(
+                std::vector<float>&                 fftVec,
+                std::vector<std::vector<float>>&    melFilterBank,
+                std::vector<int32_t>&               filterBankFilterFirst,
+                std::vector<int32_t>&               filterBankFilterLast,
+                std::vector<float>&                 melEnergies) override;
+
+        /**
+         * @brief           Override for the base class implementation convert mel
+         *                  energies to logarithmic scale. The difference from
+         *                  default behaviour is that the power is converted to dB
+         *                  and subsequently clamped.
+         * @param[in,out]   melEnergies   1D vector of Mel energies.
+         **/
+        void ConvertToLogarithmicScale(
+                std::vector<float>& melEnergies) override;
+
+        /**
+         * @brief       Create a matrix used to calculate Discrete Cosine
+         *              Transform. Override for the base class' default
+         *              implementation as the first and last elements
+         *              use a different normaliser.
+         * @param[in]   inputLength        Input length of the buffer on which
+         *                                 DCT will be performed.
+         * @param[in]   coefficientCount   Total coefficients per input length.
+         * @return      1D vector with inputLength x coefficientCount elements
+         *              populated with DCT coefficients.
+         */
+        std::vector<float> CreateDCTMatrix(
+                int32_t inputLength,
+                int32_t coefficientCount) override;
+
+        /**
+         * @brief       Given the low and high Mel values, get the normaliser
+         *              for weights to be applied when populating the filter
+         *              bank. Override for the base class implementation.
+         * @param[in]   leftMel        Low Mel frequency value.
+         * @param[in]   rightMel       High Mel frequency value.
+         * @param[in]   useHTKMethod   Bool to signal if HTK method is to be
+         *                             used for calculation.
+         * @return      Value to use for normalising.
+         */
+        float GetMelFilterBankNormaliser(
+                const float&   leftMel,
+                const float&   rightMel,
+                bool     useHTKMethod) override;
+    };
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_WAV2LET_MFCC_HPP */
diff --git a/source/use_case/kws_asr/include/Wav2LetterModel.hpp b/source/use_case/kws_asr/include/Wav2LetterModel.hpp
new file mode 100644
index 0000000..fb701ea
--- /dev/null
+++ b/source/use_case/kws_asr/include/Wav2LetterModel.hpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_WAV2LETTER_MODEL_HPP
+#define KWS_ASR_WAV2LETTER_MODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+namespace asr {
+    extern const int g_FrameLength;
+    extern const int g_FrameStride;
+    extern const float g_ScoreThreshold;
+    extern const int g_ctxLen;
+} /* namespace asr */
+} /* namespace app */
+} /* namespace arm */
+
+namespace arm {
+namespace app {
+
+    class Wav2LetterModel : public Model {
+        	
+    public:
+        /* Indices for the expected model - based on input and output tensor shapes */
+        static constexpr uint32_t ms_inputRowsIdx = 1;
+        static constexpr uint32_t ms_inputColsIdx = 2;
+        static constexpr uint32_t ms_outputRowsIdx = 2;
+        static constexpr uint32_t ms_outputColsIdx = 3;
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int _ms_maxOpCnt = 5;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<_ms_maxOpCnt> _m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_WAV2LETTER_MODEL_HPP */
diff --git a/source/use_case/kws_asr/include/Wav2LetterPostprocess.hpp b/source/use_case/kws_asr/include/Wav2LetterPostprocess.hpp
new file mode 100644
index 0000000..3a9d401
--- /dev/null
+++ b/source/use_case/kws_asr/include/Wav2LetterPostprocess.hpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_WAV2LET_POSTPROC_HPP
+#define KWS_ASR_WAV2LET_POSTPROC_HPP
+
+#include "TensorFlowLiteMicro.hpp" /* TensorFlow headers */
+#include "hal.h"    /* stdout facility */
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    /**
+     * @brief   Helper class to manage tensor post-processing for "wav2letter"
+     *          output.
+     */
+    class Postprocess {
+    public:
+        /**
+         * @brief       Constructor
+         * @param[in]   contextLen   Left and right context length for
+         *                           output tensor.
+         * @param[in]   innerLen     This is the length of the section
+         *                           between left and right context.
+         **/
+        Postprocess(uint32_t contextLen,
+                    uint32_t innerLen,
+                    uint32_t blankTokenIdx);
+
+        Postprocess() = delete;
+        ~Postprocess() = default;
+
+        /**
+         * @brief       Erases the required part of the tensor based
+         *              on context lengths set up during initialisation
+         * @param[in]   tensor          Pointer to the tensor
+         * @param[in]   axisIdx         Index of the axis on which erase is
+         *                              performed.
+         * @param[in]   lastIteration   Flag to signal is this is the
+         *                              last iteration in which case
+         *                              the right context is preserved.
+         * @return      true if successful, false otherwise.
+         */
+        bool Invoke(TfLiteTensor*  tensor,
+                    uint32_t axisIdx,
+                    bool lastIteration = false);
+
+    private:
+        uint32_t    _m_contextLen;      /* Lengths of left and right contexts. */
+        uint32_t    _m_innerLen;        /* Length of inner context. */
+        uint32_t    _m_totalLen;        /* Total length of the required axis. */
+        uint32_t    _m_countIterations; /* Current number of iterations. */
+        uint32_t    _m_blankTokenIdx;   /* Index of the labels blank token. */
+        /**
+         * @brief       Checks if the tensor and axis index are valid
+         *              inputs to the object - based on how it has been
+         *              initialised.
+         * @return      true if valid, false otherwise.
+         */
+        bool _IsInputValid(TfLiteTensor*  tensor,
+                           uint32_t axisIdx) const;
+
+        /**
+         * @brief       Gets the tensor data element size in bytes based
+         *              on the tensor type.
+         * @return      Size in bytes, 0 if not supported.
+         */
+        uint32_t _GetTensorElementSize(TfLiteTensor* tensor);
+
+        /**
+         * @brief       Erases sections from the data assuming row-wise
+         *              arrangement along the context axis.
+         * @return      true if successful, false otherwise.
+         */
+        bool _EraseSectionsRowWise(uint8_t* ptrData,
+                                   uint32_t strideSzBytes,
+                                   bool lastIteration);
+
+    };
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_WAV2LET_POSTPROC_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/include/Wav2LetterPreprocess.hpp b/source/use_case/kws_asr/include/Wav2LetterPreprocess.hpp
new file mode 100644
index 0000000..3ffabb4
--- /dev/null
+++ b/source/use_case/kws_asr/include/Wav2LetterPreprocess.hpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_ASR_WAV2LET_PREPROC_HPP
+#define KWS_ASR_WAV2LET_PREPROC_HPP
+
+#include "Wav2LetterModel.hpp"
+#include "Wav2LetterMfcc.hpp"
+#include "AudioUtils.hpp"
+#include "DataStructures.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    /* Class to facilitate pre-processing calculation for Wav2Letter model
+     * for ASR. */
+    using AudioWindow = SlidingWindow <const int16_t>;
+
+    class Preprocess {
+    public:
+        /**
+         * @brief       Constructor
+         * @param[in]   numMfccFeatures   Number of MFCC features per window.
+         * @param[in]   windowLen         Number of elements in a window.
+         * @param[in]   windowStride      Stride (in number of elements) for
+         *                                moving the window.
+         * @param[in]   numMfccVectors    Number of MFCC vectors per window.
+        */
+        Preprocess(
+            uint32_t  numMfccFeatures,
+            uint32_t  windowLen,
+            uint32_t  windowStride,
+            uint32_t  numMfccVectors);
+        Preprocess() = delete;
+        ~Preprocess() = default;
+
+        /**
+         * @brief       Calculates the features required from audio data. This
+         *              includes MFCC, first and second order deltas,
+         *              normalisation and finally, quantisation. The tensor is
+         *              populated with feature from a given window placed along
+         *              in a single row.
+         * @param[in]   audioData      Pointer to the first element of audio data.
+         * @param[in]   audioDataLen   Number of elements in the audio data.
+         * @param[in]   tensor         Tensor to be populated.
+         * @return      true if successful, false in case of error.
+         */
+        bool Invoke(const int16_t * audioData,
+                    uint32_t  audioDataLen,
+                    TfLiteTensor *  tensor);
+
+    protected:
+         /**
+          * @brief Computes the first and second order deltas for the
+          *        MFCC buffers - they are assumed to be populated.
+          *
+          * @param[in]  mfcc     MFCC buffers.
+          * @param[out] delta1   Result of the first diff computation.
+          * @param[out] delta2   Result of the second diff computation.
+          *
+          * @return true if successful, false otherwise.
+          */
+         static bool _ComputeDeltas(Array2d<float>& mfcc,
+                                    Array2d<float>& delta1,
+                                    Array2d<float>& delta2);
+
+        /**
+         * @brief       Given a 2D vector of floats, computes the mean.
+         * @param[in]   vec   Vector of vector of floats.
+         * @return      Mean value.
+         */
+        static float _GetMean(Array2d<float>& vec);
+
+        /**
+         * @brief       Given a 2D vector of floats, computes the stddev.
+         * @param[in]   vec    Vector of vector of floats.
+         * @param[in]   mean   Mean value of the vector passed in.
+         * @return      stddev value.
+         */
+        static float _GetStdDev(Array2d<float>& vec,
+                                float mean);
+
+        /**
+         * @brief           Given a 2D vector of floats, normalises it using
+         *                  the mean and the stddev
+         * @param[in,out]   vec   Vector of vector of floats.
+         */
+        static void _NormaliseVec(Array2d<float>& vec);
+
+        /**
+         * @brief       Normalises the MFCC and delta buffers.
+         */
+        void _Normalise();
+
+        /**
+         * @brief       Given the quantisation and data type limits, computes
+         *              the quantised values of a floating point input data.
+         * @param[in]   elem            Element to be quantised.
+         * @param[in]   quantScale      Scale.
+         * @param[in]   quantOffset     Offset.
+         * @param[in]   minVal          Numerical limit - minimum.
+         * @param[in]   maxVal          Numerical limit - maximum.
+         * @return      Floating point quantised value.
+         */
+        static float _GetQuantElem(
+                float     elem,
+                float     quantScale,
+                int       quantOffset,
+                float     minVal,
+                float     maxVal);
+
+        /**
+         * @brief       Quantises the MFCC and delta buffers, and places them
+         *              in the output buffer. While doing so, it transposes
+         *              the data. Reason: Buffers in this class are arranged
+         *              for "time" axis to be row major. Primary reason for
+         *              this being the convolution speed up (as we can use
+         *              contiguous memory). The output, however, requires the
+         *              time axis to be in column major arrangement.
+         * @param[in]   outputBuf       Pointer to the output buffer.
+         * @param[in]   outputBufSz     Output buffer's size.
+         * @param[in]   quantScale      Quantisation scale.
+         * @param[in]   quantOffset     Quantisation offset.
+         */
+        template <typename T>
+        bool _Quantise(
+                T *             outputBuf,
+                const uint32_t  outputBufSz,
+                const float     quantScale,
+                const int       quantOffset)
+        {
+            /* Check the output size will for everything. */
+            if (outputBufSz < (this->_m_mfccBuf.size(0) * 3 * sizeof(T))) {
+                printf_err("Tensor size too small for features\n");
+                return false;
+            }
+
+            /* Populate. */
+            T * outputBufMfcc = outputBuf;
+            T * outputBufD1 = outputBuf + this->_m_numMfccFeats;
+            T * outputBufD2 = outputBufD1 + this->_m_numMfccFeats;
+            const uint32_t ptrIncr = this->_m_numMfccFeats * 2;  /* (3 vectors - 1 vector) */
+
+            const float minVal = std::numeric_limits<T>::min();
+            const float maxVal = std::numeric_limits<T>::max();
+
+            /* We need to do a transpose while copying and concatenating
+             * the tensor. */
+            for (uint32_t j = 0; j < this->_m_numFeatVectors; ++j) {
+                for (uint32_t i = 0; i < this->_m_numMfccFeats; ++i) {
+                    *outputBufMfcc++ = static_cast<T>(this->_GetQuantElem(
+                                        this->_m_mfccBuf(i, j), quantScale,
+                                        quantOffset, minVal, maxVal));
+                    *outputBufD1++ = static_cast<T>(this->_GetQuantElem(
+                                        this->_m_delta1Buf(i, j), quantScale,
+                                        quantOffset, minVal, maxVal));
+                    *outputBufD2++ = static_cast<T>(this->_GetQuantElem(
+                                        this->_m_delta2Buf(i, j), quantScale,
+                                        quantOffset, minVal, maxVal));
+                }
+                outputBufMfcc += ptrIncr;
+                outputBufD1 += ptrIncr;
+                outputBufD2 += ptrIncr;
+            }
+
+            return true;
+        }
+
+    private:
+        Wav2LetterMFCC      _m_mfcc;            /* MFCC instance. */
+
+        /* Actual buffers to be populated. */
+        Array2d<float>      _m_mfccBuf;         /* Contiguous buffer 1D: MFCC */
+        Array2d<float>      _m_delta1Buf;       /* Contiguous buffer 1D: Delta 1 */
+        Array2d<float>      _m_delta2Buf;       /* Contiguous buffer 1D: Delta 2 */
+
+        uint32_t            _m_windowLen;       /* Window length for MFCC. */
+        uint32_t            _m_windowStride;    /* Window stride len for MFCC. */
+        uint32_t            _m_numMfccFeats;    /* Number of MFCC features per window. */
+        uint32_t            _m_numFeatVectors;  /* Number of _m_numMfccFeats. */
+        AudioWindow         _m_window;          /* Sliding window. */
+
+    };
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_ASR_WAV2LET_PREPROC_HPP */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/AsrClassifier.cc b/source/use_case/kws_asr/src/AsrClassifier.cc
new file mode 100644
index 0000000..bc86e09
--- /dev/null
+++ b/source/use_case/kws_asr/src/AsrClassifier.cc
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AsrClassifier.hpp"
+
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+
+template<typename T>
+bool arm::app::AsrClassifier::_GetTopResults(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint)
+{
+    const uint32_t nElems = tensor->dims->data[arm::app::Wav2LetterModel::ms_outputRowsIdx];
+    const uint32_t nLetters = tensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx];
+
+
+    /* NOTE: tensor's size verification against labels should be
+     *       checked by the calling/public function. */
+    if (nLetters < 1) {
+        return false;
+    }
+
+    /* Final results' container. */
+    vecResults = std::vector<ClassificationResult>(nElems);
+
+    T* tensorData = tflite::GetTensorData<T>(tensor);
+
+    /* Get the top 1 results. */
+    for (uint32_t i = 0, row = 0; i < nElems; ++i, row+=nLetters) {
+        std::pair<T, uint32_t> top_1 = std::make_pair(tensorData[row + 0], 0);
+
+        for (uint32_t j = 1; j < nLetters; ++j) {
+            if (top_1.first < tensorData[row + j]) {
+                top_1.first = tensorData[row + j];
+                top_1.second = j;
+            }
+        }
+
+        double score = static_cast<int> (top_1.first);
+        vecResults[i].m_normalisedVal = scale * (score - zeroPoint);
+        vecResults[i].m_label = labels[top_1.second];
+        vecResults[i].m_labelIdx = top_1.second;
+    }
+
+    return true;
+}
+template bool arm::app::AsrClassifier::_GetTopResults<uint8_t>(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint);
+template bool arm::app::AsrClassifier::_GetTopResults<int8_t>(TfLiteTensor* tensor,
+                            std::vector<ClassificationResult>& vecResults,
+                            const std::vector <std::string>& labels, double scale, double zeroPoint);
+
+bool arm::app::AsrClassifier::GetClassificationResults(
+            TfLiteTensor* outputTensor,
+            std::vector<ClassificationResult>& vecResults,
+            const std::vector <std::string>& labels, uint32_t topNCount)
+{
+        vecResults.clear();
+
+        constexpr int minTensorDims = static_cast<int>(
+            (arm::app::Wav2LetterModel::ms_outputRowsIdx > arm::app::Wav2LetterModel::ms_outputColsIdx)?
+             arm::app::Wav2LetterModel::ms_outputRowsIdx : arm::app::Wav2LetterModel::ms_outputColsIdx);
+
+        constexpr uint32_t outColsIdx = arm::app::Wav2LetterModel::ms_outputColsIdx;
+
+        /* Sanity checks. */
+        if (outputTensor == nullptr) {
+            printf_err("Output vector is null pointer.\n");
+            return false;
+        } else if (outputTensor->dims->size < minTensorDims) {
+            printf_err("Output tensor expected to be 3D (1, m, n)\n");
+            return false;
+        } else if (static_cast<uint32_t>(outputTensor->dims->data[outColsIdx]) < topNCount) {
+            printf_err("Output vectors are smaller than %u\n", topNCount);
+            return false;
+        } else if (static_cast<uint32_t>(outputTensor->dims->data[outColsIdx]) != labels.size()) {
+            printf("Output size doesn't match the labels' size\n");
+            return false;
+        }
+
+        if (topNCount != 1) {
+            warn("TopNCount value ignored in this implementation\n");
+        }
+
+        /* To return the floating point values, we need quantization parameters. */
+        QuantParams quantParams = GetTensorQuantParams(outputTensor);
+
+        bool resultState;
+
+        switch (outputTensor->type) {
+            case kTfLiteUInt8:
+                resultState = this->_GetTopResults<uint8_t>(
+                                        outputTensor, vecResults,
+                                        labels, quantParams.scale,
+                                        quantParams.offset);
+                break;
+            case kTfLiteInt8:
+                resultState = this->_GetTopResults<int8_t>(
+                                        outputTensor, vecResults,
+                                        labels, quantParams.scale,
+                                        quantParams.offset);
+                break;
+            default:
+                printf_err("Tensor type %s not supported by classifier\n",
+                    TfLiteTypeGetName(outputTensor->type));
+                return false;
+        }
+
+        if (!resultState) {
+            printf_err("Failed to get sorted set\n");
+            return false;
+        }
+
+        return true;
+}
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/DsCnnModel.cc b/source/use_case/kws_asr/src/DsCnnModel.cc
new file mode 100644
index 0000000..b573a12
--- /dev/null
+++ b/source/use_case/kws_asr/src/DsCnnModel.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+
+#include "hal.h"
+
+namespace arm {
+namespace app {
+namespace kws {
+    extern uint8_t* GetModelPointer();
+    extern size_t GetModelLen();
+} /* namespace kws */
+} /* namespace app */
+} /* namespace arm */
+
+const tflite::MicroOpResolver& arm::app::DsCnnModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+bool arm::app::DsCnnModel::EnlistOperations()
+{
+    this->_m_opResolver.AddAveragePool2D();
+    this->_m_opResolver.AddConv2D();
+    this->_m_opResolver.AddDepthwiseConv2D();
+    this->_m_opResolver.AddFullyConnected();
+    this->_m_opResolver.AddRelu();
+    this->_m_opResolver.AddSoftmax();
+    this->_m_opResolver.AddQuantize();
+    this->_m_opResolver.AddDequantize();
+    this->_m_opResolver.AddReshape();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+    return true;
+}
+
+const uint8_t* arm::app::DsCnnModel::ModelPointer()
+{
+    return arm::app::kws::GetModelPointer();
+}
+
+size_t arm::app::DsCnnModel::ModelSize()
+{
+    return arm::app::kws::GetModelLen();
+}
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/MainLoop.cc b/source/use_case/kws_asr/src/MainLoop.cc
new file mode 100644
index 0000000..37146c9
--- /dev/null
+++ b/source/use_case/kws_asr/src/MainLoop.cc
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"                    /* Brings in platform definitions. */
+#include "InputFiles.hpp"           /* For input images. */
+#include "Labels_dscnn.hpp"         /* For DS-CNN label strings. */
+#include "Labels_wav2letter.hpp"    /* For Wav2Letter label strings. */
+#include "Classifier.hpp"           /* KWS classifier. */
+#include "AsrClassifier.hpp"        /* ASR classifier. */
+#include "DsCnnModel.hpp"           /* KWS model class for running inference. */
+#include "Wav2LetterModel.hpp"      /* ASR model class for running inference. */
+#include "UseCaseCommonUtils.hpp"   /* Utils functions. */
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include "Wav2LetterPreprocess.hpp" /* ASR pre-processing class. */
+#include "Wav2LetterPostprocess.hpp"/* ASR post-processing class. */
+
+using KwsClassifier = arm::app::Classifier;
+
+enum opcodes
+{
+    MENU_OPT_RUN_INF_NEXT = 1,       /* Run on next vector. */
+    MENU_OPT_RUN_INF_CHOSEN,         /* Run on a user provided vector index. */
+    MENU_OPT_RUN_INF_ALL,            /* Run inference on all. */
+    MENU_OPT_SHOW_MODEL_INFO,        /* Show model info. */
+    MENU_OPT_LIST_AUDIO_CLIPS        /* List the current baked audio clips. */
+};
+
+static void DisplayMenu()
+{
+    printf("\n\nUser input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Classify next audio clip\n", MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Classify audio clip at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run classification on all audio clips\n", MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List audio clips\n\n", MENU_OPT_LIST_AUDIO_CLIPS);
+    printf("  Choice: ");
+}
+
+/** @brief Gets the number of MFCC features for a single window. */
+static uint32_t GetNumMfccFeatures(const arm::app::Model& model);
+
+/** @brief Gets the number of MFCC feature vectors to be computed. */
+static uint32_t GetNumMfccFeatureVectors(const arm::app::Model& model);
+
+/** @brief Gets the output context length (left and right) for post-processing. */
+static uint32_t GetOutputContextLen(const arm::app::Model& model,
+                                    uint32_t inputCtxLen);
+
+/** @brief Gets the output inner length for post-processing. */
+static uint32_t GetOutputInnerLen(const arm::app::Model& model,
+                                  uint32_t outputCtxLen);
+
+void main_loop(hal_platform& platform)
+{
+    /* Model wrapper objects. */
+    arm::app::DsCnnModel kwsModel;
+    arm::app::Wav2LetterModel asrModel;
+
+    /* Load the models. */
+    if (!kwsModel.Init()) {
+        printf_err("Failed to initialise KWS model\n");
+        return;
+    }
+
+    /* Initialise the asr model using the same allocator from KWS
+     * to re-use the tensor arena. */
+    if (!asrModel.Init(kwsModel.GetAllocator())) {
+        printf_err("Failed to initalise ASR model\n");
+        return;
+    }
+
+    /* Initialise ASR pre-processing. */
+    arm::app::audio::asr::Preprocess prep(
+            GetNumMfccFeatures(asrModel),
+            arm::app::asr::g_FrameLength,
+            arm::app::asr::g_FrameStride,
+            GetNumMfccFeatureVectors(asrModel));
+
+    /* Initialise ASR post-processing. */
+    const uint32_t outputCtxLen = GetOutputContextLen(asrModel, arm::app::asr::g_ctxLen);
+    const uint32_t blankTokenIdx = 28;
+    arm::app::audio::asr::Postprocess postp(
+            outputCtxLen,
+            GetOutputInnerLen(asrModel, outputCtxLen),
+            blankTokenIdx);
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("kwsmodel", kwsModel);
+    caseContext.Set<arm::app::Model&>("asrmodel", asrModel);
+    caseContext.Set<uint32_t>("clipIndex", 0);
+    caseContext.Set<uint32_t>("ctxLen", arm::app::asr::g_ctxLen);  /* Left and right context length (MFCC feat vectors). */
+    caseContext.Set<int>("kwsframeLength", arm::app::kws::g_FrameLength);
+    caseContext.Set<int>("kwsframeStride", arm::app::kws::g_FrameStride);
+    caseContext.Set<float>("kwsscoreThreshold", arm::app::kws::g_ScoreThreshold);  /* Normalised score threshold. */
+    caseContext.Set<uint32_t >("kwsNumMfcc", arm::app::kws::g_NumMfcc);
+    caseContext.Set<uint32_t >("kwsNumAudioWins", arm::app::kws::g_NumAudioWins);
+
+    caseContext.Set<int>("asrframeLength", arm::app::asr::g_FrameLength);
+    caseContext.Set<int>("asrframeStride", arm::app::asr::g_FrameStride);
+    caseContext.Set<float>("asrscoreThreshold", arm::app::asr::g_ScoreThreshold);  /* Normalised score threshold. */
+
+    KwsClassifier kwsClassifier;  /* Classifier wrapper object. */
+    arm::app::AsrClassifier asrClassifier;  /* Classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("kwsclassifier", kwsClassifier);
+    caseContext.Set<arm::app::AsrClassifier&>("asrclassifier", asrClassifier);
+
+    caseContext.Set<arm::app::audio::asr::Preprocess&>("preprocess", prep);
+    caseContext.Set<arm::app::audio::asr::Postprocess&>("postprocess", postp);
+
+    std::vector<std::string> asrLabels;
+    arm::app::asr::GetLabelsVector(asrLabels);
+    std::vector<std::string> kwsLabels;
+    arm::app::kws::GetLabelsVector(kwsLabels);
+    caseContext.Set<const std::vector <std::string>&>("asrlabels", asrLabels);
+    caseContext.Set<const std::vector <std::string>&>("kwslabels", kwsLabels);
+
+    /* Index of the kws outputs we trigger ASR on. */
+    caseContext.Set<uint32_t>("keywordindex", 2);
+
+    /* Loop. */
+    bool executionSuccessful = true;
+    constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+    /* Loop. */
+    do {
+        int menuOption = MENU_OPT_RUN_INF_NEXT;
+        if (bUseMenu) {
+            DisplayMenu();
+            menuOption = arm::app::ReadUserInputAsInt(platform);
+            printf("\n");
+        }
+        switch (menuOption) {
+            case MENU_OPT_RUN_INF_NEXT:
+                executionSuccessful = ClassifyAudioHandler(
+                        caseContext,
+                        caseContext.Get<uint32_t>("clipIndex"),
+                        false);
+                break;
+            case MENU_OPT_RUN_INF_CHOSEN: {
+                printf("    Enter the audio clip index [0, %d]: ",
+                       NUMBER_OF_FILES-1);
+                auto clipIndex = static_cast<uint32_t>(
+                        arm::app::ReadUserInputAsInt(platform));
+                executionSuccessful = ClassifyAudioHandler(caseContext,
+                                                           clipIndex,
+                                                           false);
+                break;
+            }
+            case MENU_OPT_RUN_INF_ALL:
+                executionSuccessful = ClassifyAudioHandler(
+                        caseContext,
+                        caseContext.Get<uint32_t>("clipIndex"),
+                        true);
+                break;
+            case MENU_OPT_SHOW_MODEL_INFO:
+                executionSuccessful = kwsModel.ShowModelInfoHandler();
+                executionSuccessful = asrModel.ShowModelInfoHandler();
+                break;
+            case MENU_OPT_LIST_AUDIO_CLIPS:
+                executionSuccessful = ListFilesHandler(caseContext);
+                break;
+            default:
+                printf("Incorrect choice, try again.");
+                break;
+        }
+    } while (executionSuccessful && bUseMenu);
+    info("Main loop terminated.\n");
+}
+
+static uint32_t GetNumMfccFeatures(const arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    const int inputCols = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputColsIdx];
+    if (0 != inputCols % 3) {
+        printf_err("Number of input columns is not a multiple of 3\n");
+    }
+    return std::max(inputCols/3, 0);
+}
+
+static uint32_t GetNumMfccFeatureVectors(const arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    const int inputRows = inputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
+    return std::max(inputRows, 0);
+}
+
+static uint32_t GetOutputContextLen(const arm::app::Model& model, const uint32_t inputCtxLen)
+{
+    const uint32_t inputRows = GetNumMfccFeatureVectors(model);
+    const uint32_t inputInnerLen = inputRows - (2 * inputCtxLen);
+    constexpr uint32_t ms_outputRowsIdx = arm::app::Wav2LetterModel::ms_outputRowsIdx;
+
+    /* Check to make sure that the input tensor supports the above context and inner lengths. */
+    if (inputRows <= 2 * inputCtxLen || inputRows <= inputInnerLen) {
+        printf_err("Input rows not compatible with ctx of %u\n",
+                   inputCtxLen);
+        return 0;
+    }
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+    const uint32_t outputRows = std::max(outputTensor->dims->data[ms_outputRowsIdx], 0);
+
+    const float tensorColRatio = static_cast<float>(inputRows)/
+                                 static_cast<float>(outputRows);
+
+    return std::round(static_cast<float>(inputCtxLen)/tensorColRatio);
+}
+
+static uint32_t GetOutputInnerLen(const arm::app::Model& model,
+                                  const uint32_t outputCtxLen)
+{
+    constexpr uint32_t ms_outputRowsIdx = arm::app::Wav2LetterModel::ms_outputRowsIdx;
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+    const uint32_t outputRows = std::max(outputTensor->dims->data[ms_outputRowsIdx], 0);
+    return (outputRows - (2 * outputCtxLen));
+}
diff --git a/source/use_case/kws_asr/src/OutputDecode.cc b/source/use_case/kws_asr/src/OutputDecode.cc
new file mode 100644
index 0000000..41fbe07
--- /dev/null
+++ b/source/use_case/kws_asr/src/OutputDecode.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "OutputDecode.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    std::string DecodeOutput(const std::vector<ClassificationResult>& vecResults)
+    {
+        std::string CleanOutputBuffer;
+
+        for (size_t i = 0; i < vecResults.size(); ++i)  /* For all elements in vector. */
+        {
+            while (i+1 < vecResults.size() &&
+                   vecResults[i].m_label == vecResults[i+1].m_label)  /* While the current element is equal to the next, ignore it and move on. */
+            {
+                ++i;
+            }
+            if (vecResults[i].m_label != "$")  /* $ is a character used to represent unknown and double characters so should not be in output. */
+            {
+                CleanOutputBuffer += vecResults[i].m_label;  /* If the element is different to the next, it will be appended to CleanOutputBuffer. */
+            }
+        }
+
+        return CleanOutputBuffer;  /* Return string type containing clean output. */
+    }
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/kws_asr/src/UseCaseHandler.cc b/source/use_case/kws_asr/src/UseCaseHandler.cc
new file mode 100644
index 0000000..c50796f
--- /dev/null
+++ b/source/use_case/kws_asr/src/UseCaseHandler.cc
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "hal.h"
+#include "InputFiles.hpp"
+#include "AudioUtils.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "DsCnnModel.hpp"
+#include "DsCnnMfcc.hpp"
+#include "Classifier.hpp"
+#include "KwsResult.hpp"
+#include "Wav2LetterMfcc.hpp"
+#include "Wav2LetterPreprocess.hpp"
+#include "Wav2LetterPostprocess.hpp"
+#include "AsrResult.hpp"
+#include "AsrClassifier.hpp"
+#include "OutputDecode.hpp"
+
+
+using KwsClassifier = arm::app::Classifier;
+
+namespace arm {
+namespace app {
+
+    enum AsrOutputReductionAxis {
+        AxisRow = 1,
+        AxisCol = 2
+    };
+
+    struct KWSOutput {
+        bool executionSuccess = false;
+        const int16_t* asrAudioStart = nullptr;
+        int32_t asrAudioSamples = 0;
+    };
+
+    /**
+    * @brief           Helper function to increment current audio clip index
+    * @param[in,out]   ctx     pointer to the application context object
+    **/
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+    /**
+    * @brief           Helper function to increment current audio clip index
+    * @param[in,out]   ctx     pointer to the application context object
+    **/
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+    /**
+     * @brief           Helper function to set the audio clip index
+     * @param[in,out]   ctx     pointer to the application context object
+     * @param[in]       idx     value to be set
+     * @return          true if index is set, false otherwise
+     **/
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
+
+    /**
+     * @brief           Presents kws inference results using the data presentation
+     *                  object.
+     * @param[in]       platform    reference to the hal platform object
+     * @param[in]       results     vector of classification results to be displayed
+     * @param[in]       infTimeMs   inference time in milliseconds, if available
+     *                              Otherwise, this can be passed in as 0.
+     * @return          true if successful, false otherwise
+     **/
+    static bool _PresentInferenceResult(hal_platform& platform, std::vector<arm::app::kws::KwsResult>& results);
+
+    /**
+     * @brief           Presents asr inference results using the data presentation
+     *                  object.
+     * @param[in]       platform    reference to the hal platform object
+     * @param[in]       results     vector of classification results to be displayed
+     * @param[in]       infTimeMs   inference time in milliseconds, if available
+     *                              Otherwise, this can be passed in as 0.
+     * @return          true if successful, false otherwise
+     **/
+    static bool _PresentInferenceResult(hal_platform& platform, std::vector<arm::app::asr::AsrResult>& results);
+
+    /**
+     * @brief Returns a function to perform feature calculation and populates input tensor data with
+     * MFCC data.
+     *
+     * Input tensor data type check is performed to choose correct MFCC feature data type.
+     * If tensor has an integer data type then original features are quantised.
+     *
+     * Warning: mfcc calculator provided as input must have the same life scope as returned function.
+     *
+     * @param[in]           mfcc            MFCC feature calculator.
+     * @param[in,out]       inputTensor     Input tensor pointer to store calculated features.
+     * @param[in]            cacheSize      Size of the feture vectors cache (number of feature vectors).
+     *
+     * @return function     function to be called providing audio sample and sliding window index.
+     **/
+    static std::function<void (std::vector<int16_t>&, int, bool, size_t)>
+    GetFeatureCalculator(audio::DsCnnMFCC&  mfcc,
+                         TfLiteTensor*      inputTensor,
+                         size_t             cacheSize);
+
+    /**
+     * @brief Performs the KWS pipeline.
+     * @param[in,out]   ctx pointer to the application context object
+     *
+     * @return KWSOutput    struct containing pointer to audio data where ASR should begin
+     *                      and how much data to process.
+     */
+    static KWSOutput doKws(ApplicationContext& ctx) {
+        constexpr uint32_t dataPsnTxtInfStartX = 20;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        constexpr int minTensorDims = static_cast<int>(
+            (arm::app::DsCnnModel::ms_inputRowsIdx > arm::app::DsCnnModel::ms_inputColsIdx)?
+             arm::app::DsCnnModel::ms_inputRowsIdx : arm::app::DsCnnModel::ms_inputColsIdx);
+
+        KWSOutput output;
+
+        auto& kwsModel = ctx.Get<Model&>("kwsmodel");
+        if (!kwsModel.IsInited()) {
+            printf_err("KWS model has not been initialised\n");
+            return output;
+        }
+
+        const int kwsFrameLength = ctx.Get<int>("kwsframeLength");
+        const int kwsFrameStride = ctx.Get<int>("kwsframeStride");
+        const float kwsScoreThreshold = ctx.Get<float>("kwsscoreThreshold");
+
+        TfLiteTensor* kwsOutputTensor = kwsModel.GetOutputTensor(0);
+        TfLiteTensor* kwsInputTensor = kwsModel.GetInputTensor(0);
+
+        if (!kwsInputTensor->dims) {
+            printf_err("Invalid input tensor dims\n");
+            return output;
+        } else if (kwsInputTensor->dims->size < minTensorDims) {
+            printf_err("Input tensor dimension should be >= %d\n", minTensorDims);
+            return output;
+        }
+
+        const uint32_t kwsNumMfccFeats = ctx.Get<uint32_t>("kwsNumMfcc");
+        const uint32_t kwsNumAudioWindows = ctx.Get<uint32_t>("kwsNumAudioWins");
+
+        audio::DsCnnMFCC kwsMfcc = audio::DsCnnMFCC(kwsNumMfccFeats, kwsFrameLength);
+        kwsMfcc.Init();
+
+        /* Deduce the data length required for 1 KWS inference from the network parameters. */
+        auto kwsAudioDataWindowSize = kwsNumAudioWindows * kwsFrameStride +
+                                        (kwsFrameLength - kwsFrameStride);
+        auto kwsMfccWindowSize = kwsFrameLength;
+        auto kwsMfccWindowStride = kwsFrameStride;
+
+        /* We are choosing to move by half the window size => for a 1 second window size,
+         * this means an overlap of 0.5 seconds. */
+        auto kwsAudioDataStride = kwsAudioDataWindowSize / 2;
+
+        info("KWS audio data window size %u\n", kwsAudioDataWindowSize);
+
+        /* Stride must be multiple of mfcc features window stride to re-use features. */
+        if (0 != kwsAudioDataStride % kwsMfccWindowStride) {
+            kwsAudioDataStride -= kwsAudioDataStride % kwsMfccWindowStride;
+        }
+
+        auto kwsMfccVectorsInAudioStride = kwsAudioDataStride/kwsMfccWindowStride;
+
+        /* We expect to be sampling 1 second worth of data at a time
+         * NOTE: This is only used for time stamp calculation. */
+        const float kwsAudioParamsSecondsPerSample = 1.0/audio::DsCnnMFCC::ms_defaultSamplingFreq;
+
+        auto currentIndex = ctx.Get<uint32_t>("clipIndex");
+
+        /* Creating a mfcc features sliding window for the data required for 1 inference. */
+        auto kwsAudioMFCCWindowSlider = audio::SlidingWindow<const int16_t>(
+                get_audio_array(currentIndex),
+                kwsAudioDataWindowSize, kwsMfccWindowSize,
+                kwsMfccWindowStride);
+
+        /* Creating a sliding window through the whole audio clip. */
+        auto audioDataSlider = audio::SlidingWindow<const int16_t>(
+                get_audio_array(currentIndex),
+                get_audio_array_size(currentIndex),
+                kwsAudioDataWindowSize, kwsAudioDataStride);
+
+        /* Calculate number of the feature vectors in the window overlap region.
+         * These feature vectors will be reused.*/
+        size_t numberOfReusedFeatureVectors = kwsAudioMFCCWindowSlider.TotalStrides() + 1
+                                              - kwsMfccVectorsInAudioStride;
+
+        auto kwsMfccFeatureCalc = GetFeatureCalculator(kwsMfcc, kwsInputTensor,
+                                                       numberOfReusedFeatureVectors);
+
+        if (!kwsMfccFeatureCalc){
+            return output;
+        }
+
+        /* Container for KWS results. */
+        std::vector<arm::app::kws::KwsResult> kwsResults;
+
+        /* Display message on the LCD - inference running. */
+        auto& platform = ctx.Get<hal_platform&>("platform");
+        std::string str_inf{"Running KWS inference... "};
+        platform.data_psn->present_data_text(
+                            str_inf.c_str(), str_inf.size(),
+                            dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+        info("Running KWS inference on audio clip %u => %s\n",
+             currentIndex, get_filename(currentIndex));
+
+        /* Start sliding through audio clip. */
+        while (audioDataSlider.HasNext()) {
+            const int16_t* inferenceWindow = audioDataSlider.Next();
+
+            /* We moved to the next window - set the features sliding to the new address. */
+            kwsAudioMFCCWindowSlider.Reset(inferenceWindow);
+
+            /* The first window does not have cache ready. */
+            bool useCache = audioDataSlider.Index() > 0 && numberOfReusedFeatureVectors > 0;
+
+            /* Start calculating features inside one audio sliding window. */
+            while (kwsAudioMFCCWindowSlider.HasNext()) {
+                const int16_t* kwsMfccWindow = kwsAudioMFCCWindowSlider.Next();
+                std::vector<int16_t> kwsMfccAudioData =
+                    std::vector<int16_t>(kwsMfccWindow, kwsMfccWindow + kwsMfccWindowSize);
+
+                /* Compute features for this window and write them to input tensor. */
+                kwsMfccFeatureCalc(kwsMfccAudioData,
+                                   kwsAudioMFCCWindowSlider.Index(),
+                                   useCache,
+                                   kwsMfccVectorsInAudioStride);
+            }
+
+            info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
+                 audioDataSlider.TotalStrides() + 1);
+
+            /* Run inference over this audio clip sliding window. */
+            arm::app::RunInference(platform, kwsModel);
+
+            std::vector<ClassificationResult> kwsClassificationResult;
+            auto& kwsClassifier = ctx.Get<KwsClassifier&>("kwsclassifier");
+
+            kwsClassifier.GetClassificationResults(
+                            kwsOutputTensor, kwsClassificationResult,
+                            ctx.Get<std::vector<std::string>&>("kwslabels"), 1);
+
+            kwsResults.emplace_back(
+                kws::KwsResult(
+                    kwsClassificationResult,
+                    audioDataSlider.Index() * kwsAudioParamsSecondsPerSample * kwsAudioDataStride,
+                    audioDataSlider.Index(), kwsScoreThreshold)
+                );
+
+            /* Keyword detected. */
+            if (kwsClassificationResult[0].m_labelIdx == ctx.Get<uint32_t>("keywordindex")) {
+                output.asrAudioStart = inferenceWindow + kwsAudioDataWindowSize;
+                output.asrAudioSamples = get_audio_array_size(currentIndex) -
+                                        (audioDataSlider.NextWindowStartIndex() -
+                                        kwsAudioDataStride + kwsAudioDataWindowSize);
+                break;
+            }
+
+#if VERIFY_TEST_OUTPUT
+            arm::app::DumpTensor(kwsOutputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+
+        } /* while (audioDataSlider.HasNext()) */
+
+        /* Erase. */
+        str_inf = std::string(str_inf.size(), ' ');
+        platform.data_psn->present_data_text(
+                            str_inf.c_str(), str_inf.size(),
+                            dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+        if (!_PresentInferenceResult(platform, kwsResults)) {
+            return output;
+        }
+
+        output.executionSuccess = true;
+        return output;
+    }
+
+    /**
+     * @brief Performs the ASR pipeline.
+     *
+     * @param ctx[in/out]   pointer to the application context object
+     * @param kwsOutput[in] struct containing pointer to audio data where ASR should begin
+     *                      and how much data to process
+     * @return bool         true if pipeline executed without failure
+     */
+    static bool doAsr(ApplicationContext& ctx, const KWSOutput& kwsOutput) {
+        constexpr uint32_t dataPsnTxtInfStartX = 20;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        auto& platform = ctx.Get<hal_platform&>("platform");
+        platform.data_psn->clear(COLOR_BLACK);
+
+        /* Get model reference. */
+        auto& asrModel = ctx.Get<Model&>("asrmodel");
+        if (!asrModel.IsInited()) {
+            printf_err("ASR model has not been initialised\n");
+            return false;
+        }
+
+        /* Get score threshold to be applied for the classifier (post-inference). */
+        auto asrScoreThreshold = ctx.Get<float>("asrscoreThreshold");
+
+        /* Dimensions of the tensor should have been verified by the callee. */
+        TfLiteTensor* asrInputTensor = asrModel.GetInputTensor(0);
+        TfLiteTensor* asrOutputTensor = asrModel.GetOutputTensor(0);
+        const uint32_t asrInputRows = asrInputTensor->dims->data[arm::app::Wav2LetterModel::ms_inputRowsIdx];
+
+        /* Populate ASR MFCC related parameters. */
+        auto asrMfccParamsWinLen = ctx.Get<uint32_t>("asrframeLength");
+        auto asrMfccParamsWinStride = ctx.Get<uint32_t>("asrframeStride");
+
+        /* Populate ASR inference context and inner lengths for input. */
+        auto asrInputCtxLen = ctx.Get<uint32_t>("ctxLen");
+        const uint32_t asrInputInnerLen = asrInputRows - (2 * asrInputCtxLen);
+
+        /* Make sure the input tensor supports the above context and inner lengths. */
+        if (asrInputRows <= 2 * asrInputCtxLen || asrInputRows <= asrInputInnerLen) {
+            printf_err("ASR input rows not compatible with ctx length %u\n", asrInputCtxLen);
+            return false;
+        }
+
+        /* Audio data stride corresponds to inputInnerLen feature vectors. */
+        const uint32_t asrAudioParamsWinLen = (asrInputRows - 1) *
+                                              asrMfccParamsWinStride + (asrMfccParamsWinLen);
+        const uint32_t asrAudioParamsWinStride = asrInputInnerLen * asrMfccParamsWinStride;
+        const float asrAudioParamsSecondsPerSample =
+                                        (1.0/audio::Wav2LetterMFCC::ms_defaultSamplingFreq);
+
+        /* Get pre/post-processing objects */
+        auto& asrPrep = ctx.Get<audio::asr::Preprocess&>("preprocess");
+        auto& asrPostp = ctx.Get<audio::asr::Postprocess&>("postprocess");
+
+        /* Set default reduction axis for post-processing. */
+        const uint32_t reductionAxis = arm::app::Wav2LetterModel::ms_outputRowsIdx;
+
+        /* Get the remaining audio buffer and respective size from KWS results. */
+        const int16_t* audioArr = kwsOutput.asrAudioStart;
+        const uint32_t audioArrSize = kwsOutput.asrAudioSamples;
+
+        /* Audio clip must have enough samples to produce 1 MFCC feature. */
+        std::vector<int16_t> audioBuffer = std::vector<int16_t>(audioArr, audioArr + audioArrSize);
+        if (audioArrSize < asrMfccParamsWinLen) {
+            printf_err("Not enough audio samples, minimum needed is %u\n", asrMfccParamsWinLen);
+            return false;
+        }
+
+        /* Initialise an audio slider. */
+        auto audioDataSlider = audio::ASRSlidingWindow<const int16_t>(
+                audioBuffer.data(),
+                audioBuffer.size(),
+                asrAudioParamsWinLen,
+                asrAudioParamsWinStride);
+
+        /* Declare a container for results. */
+        std::vector<arm::app::asr::AsrResult> asrResults;
+
+        /* Display message on the LCD - inference running. */
+        std::string str_inf{"Running ASR inference... "};
+        platform.data_psn->present_data_text(
+                str_inf.c_str(), str_inf.size(),
+                dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+        size_t asrInferenceWindowLen = asrAudioParamsWinLen;
+
+        /* Start sliding through audio clip. */
+        while (audioDataSlider.HasNext()) {
+
+            /* If not enough audio see how much can be sent for processing. */
+            size_t nextStartIndex = audioDataSlider.NextWindowStartIndex();
+            if (nextStartIndex + asrAudioParamsWinLen > audioBuffer.size()) {
+                asrInferenceWindowLen = audioBuffer.size() - nextStartIndex;
+            }
+
+            const int16_t* asrInferenceWindow = audioDataSlider.Next();
+
+            info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
+                static_cast<size_t>(ceilf(audioDataSlider.FractionalTotalStrides() + 1)));
+
+            Profiler prepProfiler{&platform, "pre-processing"};
+            prepProfiler.StartProfiling();
+
+            /* Calculate MFCCs, deltas and populate the input tensor. */
+            asrPrep.Invoke(asrInferenceWindow, asrInferenceWindowLen, asrInputTensor);
+
+            prepProfiler.StopProfiling();
+            std::string prepProfileResults = prepProfiler.GetResultsAndReset();
+            info("%s\n", prepProfileResults.c_str());
+
+            /* Run inference over this audio clip sliding window. */
+            arm::app::RunInference(platform, asrModel);
+
+            /* Post-process. */
+            asrPostp.Invoke(asrOutputTensor, reductionAxis, !audioDataSlider.HasNext());
+
+            /* Get results. */
+            std::vector<ClassificationResult> asrClassificationResult;
+            auto& asrClassifier = ctx.Get<AsrClassifier&>("asrclassifier");
+            asrClassifier.GetClassificationResults(
+                    asrOutputTensor, asrClassificationResult,
+                    ctx.Get<std::vector<std::string>&>("asrlabels"), 1);
+
+            asrResults.emplace_back(asr::AsrResult(asrClassificationResult,
+                                                (audioDataSlider.Index() *
+                                                 asrAudioParamsSecondsPerSample *
+                                                 asrAudioParamsWinStride),
+                                                 audioDataSlider.Index(), asrScoreThreshold));
+
+#if VERIFY_TEST_OUTPUT
+            arm::app::DumpTensor(asrOutputTensor, asrOutputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx]);
+#endif /* VERIFY_TEST_OUTPUT */
+
+            /* Erase */
+            str_inf = std::string(str_inf.size(), ' ');
+            platform.data_psn->present_data_text(
+                        str_inf.c_str(), str_inf.size(),
+                        dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
+        }
+        if (!_PresentInferenceResult(platform, asrResults)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    /* Audio inference classification handler. */
+    bool ClassifyAudioHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
+    {
+        auto& platform = ctx.Get<hal_platform&>("platform");
+        platform.data_psn->clear(COLOR_BLACK);
+
+        /* If the request has a valid size, set the audio index. */
+        if (clipIndex < NUMBER_OF_FILES) {
+            if (!_SetAppCtxClipIdx(ctx, clipIndex)) {
+                return false;
+            }
+        }
+
+        auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
+
+        do {
+            KWSOutput kwsOutput = doKws(ctx);
+            if (!kwsOutput.executionSuccess) {
+                return false;
+            }
+
+            if (kwsOutput.asrAudioStart != nullptr && kwsOutput.asrAudioSamples > 0) {
+                info("Keyword spotted\n");
+                if(!doAsr(ctx, kwsOutput)) {
+                    printf_err("ASR failed");
+                    return false;
+                }
+            }
+
+            _IncrementAppCtxClipIdx(ctx);
+
+        } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
+
+        return true;
+    }
+
+    static void _IncrementAppCtxClipIdx(ApplicationContext& ctx)
+    {
+        auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
+
+        if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
+            ctx.Set<uint32_t>("clipIndex", 0);
+            return;
+        }
+        ++curAudioIdx;
+        ctx.Set<uint32_t>("clipIndex", curAudioIdx);
+    }
+
+    static bool _SetAppCtxClipIdx(ApplicationContext& ctx, const uint32_t idx)
+    {
+        if (idx >= NUMBER_OF_FILES) {
+            printf_err("Invalid idx %u (expected less than %u)\n",
+                idx, NUMBER_OF_FILES);
+            return false;
+        }
+        ctx.Set<uint32_t>("clipIndex", idx);
+        return true;
+    }
+
+    static bool _PresentInferenceResult(hal_platform& platform,
+                std::vector<arm::app::kws::KwsResult>& results)
+    {
+        constexpr uint32_t dataPsnTxtStartX1 = 20;
+        constexpr uint32_t dataPsnTxtStartY1 = 30;
+        constexpr uint32_t dataPsnTxtYIncr   = 16;  /* Row index increment. */
+
+        platform.data_psn->set_text_color(COLOR_GREEN);
+
+        /* Display each result. */
+        uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+
+        for (uint32_t i = 0; i < results.size(); ++i) {
+
+            std::string topKeyword{"<none>"};
+            float score = 0.f;
+
+            if (results[i].m_resultVec.size()) {
+                topKeyword = results[i].m_resultVec[0].m_label;
+                score = results[i].m_resultVec[0].m_normalisedVal;
+            }
+
+            std::string resultStr =
+                    std::string{"@"} + std::to_string(results[i].m_timeStamp) +
+                    std::string{"s: "} + topKeyword + std::string{" ("} +
+                    std::to_string(static_cast<int>(score * 100)) + std::string{"%)"};
+
+            platform.data_psn->present_data_text(
+                        resultStr.c_str(), resultStr.size(),
+                        dataPsnTxtStartX1, rowIdx1, 0);
+            rowIdx1 += dataPsnTxtYIncr;
+
+            info("For timestamp: %f (inference #: %u); threshold: %f\n",
+                 results[i].m_timeStamp, results[i].m_inferenceNumber,
+                 results[i].m_threshold);
+            for (uint32_t j = 0; j < results[i].m_resultVec.size(); ++j) {
+                info("\t\tlabel @ %u: %s, score: %f\n", j,
+                     results[i].m_resultVec[j].m_label.c_str(),
+                     results[i].m_resultVec[j].m_normalisedVal);
+            }
+        }
+
+        return true;
+    }
+
+    static bool _PresentInferenceResult(hal_platform& platform, std::vector<arm::app::asr::AsrResult>& results)
+    {
+        constexpr uint32_t dataPsnTxtStartX1 = 20;
+        constexpr uint32_t dataPsnTxtStartY1 = 80;
+        constexpr bool allow_multiple_lines = true;
+
+        platform.data_psn->set_text_color(COLOR_GREEN);
+
+        /* Results from multiple inferences should be combined before processing. */
+        std::vector<arm::app::ClassificationResult> combinedResults;
+        for (auto& result : results) {
+            combinedResults.insert(combinedResults.end(),
+                                   result.m_resultVec.begin(),
+                                   result.m_resultVec.end());
+        }
+
+        for (auto& result : results) {
+            /* Get the final result string using the decoder. */
+            std::string infResultStr = audio::asr::DecodeOutput(result.m_resultVec);
+
+            info("Result for inf %u: %s\n", result.m_inferenceNumber,
+                 infResultStr.c_str());
+        }
+
+        std::string finalResultStr = audio::asr::DecodeOutput(combinedResults);
+
+        platform.data_psn->present_data_text(
+                    finalResultStr.c_str(), finalResultStr.size(),
+                    dataPsnTxtStartX1, dataPsnTxtStartY1, allow_multiple_lines);
+
+        info("Final result: %s\n", finalResultStr.c_str());
+        return true;
+    }
+
+    /**
+     * @brief Generic feature calculator factory.
+     *
+     * Returns lambda function to compute features using features cache.
+     * Real features math is done by a lambda function provided as a parameter.
+     * Features are written to input tensor memory.
+     *
+     * @tparam T            feature vector type.
+     * @param inputTensor   model input tensor pointer.
+     * @param cacheSize     number of feature vectors to cache. Defined by the sliding window overlap.
+     * @param compute       features calculator function.
+     * @return              lambda function to compute features.
+     **/
+    template<class T>
+    std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>
+    _FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
+                 std::function<std::vector<T> (std::vector<int16_t>& )> compute)
+    {
+        /* Feature cache to be captured by lambda function. */
+        static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
+
+        return [=](std::vector<int16_t>& audioDataWindow,
+                   size_t index,
+                   bool useCache,
+                   size_t featuresOverlapIndex)
+        {
+            T* tensorData = tflite::GetTensorData<T>(inputTensor);
+            std::vector<T> features;
+
+            /* Reuse features from cache if cache is ready and sliding windows overlap.
+             * Overlap is in the beginning of sliding window with a size of a feature cache.
+             */
+            if (useCache && index < featureCache.size()) {
+                features = std::move(featureCache[index]);
+            } else {
+                features = std::move(compute(audioDataWindow));
+            }
+            auto size = features.size();
+            auto sizeBytes = sizeof(T) * size;
+            std::memcpy(tensorData + (index * size), features.data(), sizeBytes);
+
+            /* Start renewing cache as soon iteration goes out of the windows overlap. */
+            if (index >= featuresOverlapIndex) {
+                featureCache[index - featuresOverlapIndex] = std::move(features);
+            }
+        };
+    }
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
+    _FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
+                         size_t cacheSize,
+                         std::function<std::vector<int8_t> (std::vector<int16_t>& )> compute);
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
+    _FeatureCalc<uint8_t>(TfLiteTensor* inputTensor,
+                          size_t cacheSize,
+                          std::function<std::vector<uint8_t> (std::vector<int16_t>& )> compute);
+
+    template std::function<void (std::vector<int16_t>&, size_t , bool, size_t)>
+    _FeatureCalc<int16_t>(TfLiteTensor* inputTensor,
+                          size_t cacheSize,
+                          std::function<std::vector<int16_t> (std::vector<int16_t>& )> compute);
+
+    template std::function<void(std::vector<int16_t>&, size_t, bool, size_t)>
+    _FeatureCalc<float>(TfLiteTensor* inputTensor,
+                        size_t cacheSize,
+                        std::function<std::vector<float>(std::vector<int16_t>&)> compute);
+
+
+    static std::function<void (std::vector<int16_t>&, int, bool, size_t)>
+    GetFeatureCalculator(audio::DsCnnMFCC& mfcc, TfLiteTensor* inputTensor, size_t cacheSize)
+    {
+        std::function<void (std::vector<int16_t>&, size_t, bool, size_t)> mfccFeatureCalc;
+
+        TfLiteQuantization quant = inputTensor->quantization;
+
+        if (kTfLiteAffineQuantization == quant.type) {
+
+            auto* quantParams = (TfLiteAffineQuantization*) quant.params;
+            const float quantScale = quantParams->scale->data[0];
+            const int quantOffset = quantParams->zero_point->data[0];
+
+            switch (inputTensor->type) {
+                case kTfLiteInt8: {
+                    mfccFeatureCalc = _FeatureCalc<int8_t>(inputTensor,
+                                                           cacheSize,
+                                                           [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                               return mfcc.MfccComputeQuant<int8_t>(audioDataWindow,
+                                                                                                    quantScale,
+                                                                                                    quantOffset);
+                                                           }
+                    );
+                    break;
+                }
+                case kTfLiteUInt8: {
+                    mfccFeatureCalc = _FeatureCalc<uint8_t>(inputTensor,
+                                                            cacheSize,
+                                                            [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                                return mfcc.MfccComputeQuant<uint8_t>(audioDataWindow,
+                                                                                                      quantScale,
+                                                                                                      quantOffset);
+                                                            }
+                    );
+                    break;
+                }
+                case kTfLiteInt16: {
+                    mfccFeatureCalc = _FeatureCalc<int16_t>(inputTensor,
+                                                            cacheSize,
+                                                            [=, &mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                                return mfcc.MfccComputeQuant<int16_t>(audioDataWindow,
+                                                                                                      quantScale,
+                                                                                                      quantOffset);
+                                                            }
+                    );
+                    break;
+                }
+                default:
+                printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
+            }
+
+
+        } else {
+            mfccFeatureCalc = mfccFeatureCalc = _FeatureCalc<float>(inputTensor,
+                                                                    cacheSize,
+                                                                    [&mfcc](std::vector<int16_t>& audioDataWindow) {
+                                                                        return mfcc.MfccCompute(audioDataWindow);
+                                                                    });
+        }
+        return mfccFeatureCalc;
+    }
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/Wav2LetterMfcc.cc b/source/use_case/kws_asr/src/Wav2LetterMfcc.cc
new file mode 100644
index 0000000..80e4a26
--- /dev/null
+++ b/source/use_case/kws_asr/src/Wav2LetterMfcc.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterMfcc.hpp"
+
+#include "PlatformMath.hpp"
+
+#include <cfloat>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+    bool Wav2LetterMFCC::ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies)
+    {
+        const size_t numBanks = melEnergies.size();
+
+        if (numBanks != filterBankFilterFirst.size() ||
+                numBanks != filterBankFilterLast.size()) {
+            printf_err("unexpected filter bank lengths\n");
+            return false;
+        }
+
+        for (size_t bin = 0; bin < numBanks; ++bin) {
+            auto filterBankIter = melFilterBank[bin].begin();
+            float melEnergy = 1e-10;  /* Avoid log of zero at later stages, same value used in librosa. */
+            const int32_t firstIndex = filterBankFilterFirst[bin];
+            const int32_t lastIndex = filterBankFilterLast[bin];
+
+            for (int32_t i = firstIndex; i <= lastIndex; ++i) {
+                melEnergy += (*filterBankIter++ * fftVec[i]);
+            }
+
+            melEnergies[bin] = melEnergy;
+        }
+
+        return true;
+    }
+
+    void Wav2LetterMFCC::ConvertToLogarithmicScale(
+                            std::vector<float>& melEnergies)
+    {
+        float maxMelEnergy = -FLT_MAX;
+
+        /* Container for natural logarithms of mel energies. */
+        std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+        /* Because we are taking natural logs, we need to multiply by log10(e).
+         * Also, for wav2letter model, we scale our log10 values by 10. */
+        constexpr float multiplier = 10.0 *  /* Default scalar. */
+                                      0.4342944819032518;  /* log10f(std::exp(1.0))*/
+
+        /* Take log of the whole vector. */
+        math::MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+        /* Scale the log values and get the max. */
+        for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+                  iterM != melEnergies.end(); ++iterM, ++iterL) {
+
+            *iterM = *iterL * multiplier;
+
+            /* Save the max mel energy. */
+            if (*iterM > maxMelEnergy) {
+                maxMelEnergy = *iterM;
+            }
+        }
+
+        /* Clamp the mel energies. */
+        constexpr float maxDb = 80.0;
+        const float clampLevelLowdB = maxMelEnergy - maxDb;
+        for (auto iter = melEnergies.begin(); iter != melEnergies.end(); ++iter) {
+            *iter = std::max(*iter, clampLevelLowdB);
+        }
+    }
+
+    std::vector<float> Wav2LetterMFCC::CreateDCTMatrix(
+                                        const int32_t inputLength,
+                                        const int32_t coefficientCount)
+    {
+        std::vector<float> dctMatix(inputLength * coefficientCount);
+
+        /* Orthonormal normalization. */
+        const float normalizerK0 = 2 * math::MathUtils::SqrtF32(1.0f /
+                                        static_cast<float>(4*inputLength));
+        const float normalizer = 2 * math::MathUtils::SqrtF32(1.0f /
+                                        static_cast<float>(2*inputLength));
+
+        const float angleIncr = M_PI/inputLength;
+        float angle = angleIncr;  /* We start using it at k = 1 loop. */
+
+        /* First row of DCT will use normalizer K0 */
+        for (int32_t n = 0; n < inputLength; ++n) {
+            dctMatix[n] = normalizerK0  /* cos(0) = 1 */;
+        }
+
+        /* Second row (index = 1) onwards, we use standard normalizer. */
+        for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength) {
+            for (int32_t n = 0; n < inputLength; ++n) {
+                dctMatix[m+n] = normalizer *
+                    math::MathUtils::CosineF32((n + 0.5f) * angle);
+            }
+            angle += angleIncr;
+        }
+        return dctMatix;
+    }
+
+    float Wav2LetterMFCC::GetMelFilterBankNormaliser(
+                                    const float&    leftMel,
+                                    const float&    rightMel,
+                                    const bool      useHTKMethod)
+    {
+        /* Slaney normalization for mel weights. */
+        return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
+                MFCC::InverseMelScale(leftMel, useHTKMethod)));
+    }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/kws_asr/src/Wav2LetterModel.cc b/source/use_case/kws_asr/src/Wav2LetterModel.cc
new file mode 100644
index 0000000..2114a3f
--- /dev/null
+++ b/source/use_case/kws_asr/src/Wav2LetterModel.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterModel.hpp"
+
+#include "hal.h"
+
+namespace arm {
+namespace app {
+namespace asr {
+    extern uint8_t* GetModelPointer();
+    extern size_t GetModelLen();
+}
+} /* namespace app */
+} /* namespace arm */
+
+const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
+{
+    return this->_m_opResolver;
+}
+
+bool arm::app::Wav2LetterModel::EnlistOperations()
+{
+    this->_m_opResolver.AddConv2D();
+    this->_m_opResolver.AddMul();
+    this->_m_opResolver.AddMaximum();
+    this->_m_opResolver.AddReshape();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+    return true;
+}
+
+const uint8_t* arm::app::Wav2LetterModel::ModelPointer()
+{
+    return arm::app::asr::GetModelPointer();
+}
+
+size_t arm::app::Wav2LetterModel::ModelSize()
+{
+    return arm::app::asr::GetModelLen();
+}
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/Wav2LetterPostprocess.cc b/source/use_case/kws_asr/src/Wav2LetterPostprocess.cc
new file mode 100644
index 0000000..b173968
--- /dev/null
+++ b/source/use_case/kws_asr/src/Wav2LetterPostprocess.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+
+#include "Wav2LetterModel.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    Postprocess::Postprocess(const uint32_t contextLen,
+                             const uint32_t innerLen,
+                             const uint32_t blankTokenIdx)
+        :   _m_contextLen(contextLen),
+            _m_innerLen(innerLen),
+            _m_totalLen(2 * this->_m_contextLen + this->_m_innerLen),
+            _m_countIterations(0),
+            _m_blankTokenIdx(blankTokenIdx)
+    {}
+
+    bool Postprocess::Invoke(TfLiteTensor*  tensor,
+                            const uint32_t  axisIdx,
+                            const bool      lastIteration)
+    {
+        /* Basic checks. */
+        if (!this->_IsInputValid(tensor, axisIdx)) {
+            return false;
+        }
+
+        /* Irrespective of tensor type, we use unsigned "byte" */
+        uint8_t* ptrData = tflite::GetTensorData<uint8_t>(tensor);
+        const uint32_t elemSz = this->_GetTensorElementSize(tensor);
+
+        /* Other sanity checks. */
+        if (0 == elemSz) {
+            printf_err("Tensor type not supported for post processing\n");
+            return false;
+        } else if (elemSz * this->_m_totalLen > tensor->bytes) {
+            printf_err("Insufficient number of tensor bytes\n");
+            return false;
+        }
+
+        /* Which axis do we need to process? */
+        switch (axisIdx) {
+            case arm::app::Wav2LetterModel::ms_outputRowsIdx:
+                return this->_EraseSectionsRowWise(ptrData,
+                        elemSz * tensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx],
+                        lastIteration);
+            default:
+                printf_err("Unsupported axis index: %u\n", axisIdx);
+        }
+
+        return false;
+    }
+
+    bool Postprocess::_IsInputValid(TfLiteTensor*  tensor,
+                                    const uint32_t axisIdx) const
+    {
+        if (nullptr == tensor) {
+            return false;
+        }
+
+        if (static_cast<int>(axisIdx) >= tensor->dims->size) {
+            printf_err("Invalid axis index: %u; Max: %d\n",
+                axisIdx, tensor->dims->size);
+            return false;
+        }
+
+        if (static_cast<int>(this->_m_totalLen) !=
+                             tensor->dims->data[axisIdx]) {
+            printf_err("Unexpected tensor dimension for axis %d, \n",
+                tensor->dims->data[axisIdx]);
+            return false;
+        }
+
+        return true;
+    }
+
+    uint32_t Postprocess::_GetTensorElementSize(TfLiteTensor*  tensor)
+    {
+        switch(tensor->type) {
+            case kTfLiteUInt8:
+                return 1;
+            case kTfLiteInt8:
+                return 1;
+            case kTfLiteInt16:
+                return 2;
+            case kTfLiteInt32:
+                return 4;
+            case kTfLiteFloat32:
+                return 4;
+            default:
+                printf_err("Unsupported tensor type %s\n",
+                    TfLiteTypeGetName(tensor->type));
+        }
+
+        return 0;
+    }
+
+    bool Postprocess::_EraseSectionsRowWise(
+                        uint8_t*         ptrData,
+                        const uint32_t   strideSzBytes,
+                        const bool       lastIteration)
+    {
+        /* In this case, the "zero-ing" is quite simple as the region
+         * to be zeroed sits in contiguous memory (row-major). */
+        const uint32_t eraseLen = strideSzBytes * this->_m_contextLen;
+
+        /* Erase left context? */
+        if (this->_m_countIterations > 0) {
+            /* Set output of each classification window to the blank token. */
+            std::memset(ptrData, 0, eraseLen);
+            for (size_t windowIdx = 0; windowIdx < this->_m_contextLen; windowIdx++) {
+                ptrData[windowIdx*strideSzBytes + this->_m_blankTokenIdx] = 1;
+            }
+        }
+
+        /* Erase right context? */
+        if (false == lastIteration) {
+            uint8_t * rightCtxPtr = ptrData + (strideSzBytes * (this->_m_contextLen + this->_m_innerLen));
+            /* Set output of each classification window to the blank token. */
+            std::memset(rightCtxPtr, 0, eraseLen);
+            for (size_t windowIdx = 0; windowIdx < this->_m_contextLen; windowIdx++) {
+                rightCtxPtr[windowIdx*strideSzBytes + this->_m_blankTokenIdx] = 1;
+            }
+        }
+
+        if (lastIteration) {
+            this->_m_countIterations = 0;
+        } else {
+            ++this->_m_countIterations;
+        }
+
+        return true;
+    }
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/src/Wav2LetterPreprocess.cc b/source/use_case/kws_asr/src/Wav2LetterPreprocess.cc
new file mode 100644
index 0000000..613ddb0
--- /dev/null
+++ b/source/use_case/kws_asr/src/Wav2LetterPreprocess.cc
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include "PlatformMath.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <algorithm>
+#include <math.h>
+
+namespace arm {
+namespace app {
+namespace audio {
+namespace asr {
+
+    Preprocess::Preprocess(
+        const uint32_t  numMfccFeatures,
+        const uint32_t  windowLen,
+        const uint32_t  windowStride,
+        const uint32_t  numMfccVectors):
+            _m_mfcc(numMfccFeatures, windowLen),
+            _m_mfccBuf(numMfccFeatures, numMfccVectors),
+            _m_delta1Buf(numMfccFeatures, numMfccVectors),
+            _m_delta2Buf(numMfccFeatures, numMfccVectors),
+            _m_windowLen(windowLen),
+            _m_windowStride(windowStride),
+            _m_numMfccFeats(numMfccFeatures),
+            _m_numFeatVectors(numMfccVectors),
+            _m_window()
+    {
+        if (numMfccFeatures > 0 && windowLen > 0) {
+            this->_m_mfcc.Init();
+        }
+    }
+
+    bool Preprocess::Invoke(
+                const int16_t*  audioData,
+                const uint32_t  audioDataLen,
+                TfLiteTensor*   tensor)
+    {
+        this->_m_window = SlidingWindow<const int16_t>(
+                            audioData, audioDataLen,
+                            this->_m_windowLen, this->_m_windowStride);
+
+        uint32_t mfccBufIdx = 0;
+
+        std::fill(_m_mfccBuf.begin(), _m_mfccBuf.end(), 0.f);
+        std::fill(_m_delta1Buf.begin(), _m_delta1Buf.end(), 0.f);
+        std::fill(_m_delta2Buf.begin(), _m_delta2Buf.end(), 0.f);
+
+        /* While we can slide over the window. */
+        while (this->_m_window.HasNext()) {
+            const int16_t*  mfccWindow = this->_m_window.Next();
+            auto mfccAudioData = std::vector<int16_t>(
+                                        mfccWindow,
+                                        mfccWindow + this->_m_windowLen);
+            auto mfcc = this->_m_mfcc.MfccCompute(mfccAudioData);
+            for (size_t i = 0; i < this->_m_mfccBuf.size(0); ++i) {
+                this->_m_mfccBuf(i, mfccBufIdx) = mfcc[i];
+            }
+            ++mfccBufIdx;
+        }
+
+        /* Pad MFCC if needed by adding MFCC for zeros. */
+        if (mfccBufIdx != this->_m_numFeatVectors) {
+            std::vector<int16_t> zerosWindow = std::vector<int16_t>(this->_m_windowLen, 0);
+            std::vector<float> mfccZeros = this->_m_mfcc.MfccCompute(zerosWindow);
+
+            while (mfccBufIdx != this->_m_numFeatVectors) {
+                memcpy(&this->_m_mfccBuf(0, mfccBufIdx),
+                       mfccZeros.data(), sizeof(float) * _m_numMfccFeats);
+                ++mfccBufIdx;
+            }
+        }
+
+        /* Compute first and second order deltas from MFCCs. */
+        this->_ComputeDeltas(this->_m_mfccBuf,
+                             this->_m_delta1Buf,
+                             this->_m_delta2Buf);
+
+        /* Normalise. */
+        this->_Normalise();
+
+        /* Quantise. */
+        QuantParams quantParams = GetTensorQuantParams(tensor);
+
+        if (0 == quantParams.scale) {
+            printf_err("Quantisation scale can't be 0\n");
+            return false;
+        }
+
+        switch(tensor->type) {
+            case kTfLiteUInt8:
+                return this->_Quantise<uint8_t>(
+                        tflite::GetTensorData<uint8_t>(tensor), tensor->bytes,
+                        quantParams.scale, quantParams.offset);
+            case kTfLiteInt8:
+                return this->_Quantise<int8_t>(
+                        tflite::GetTensorData<int8_t>(tensor), tensor->bytes,
+                        quantParams.scale, quantParams.offset);
+            default:
+                printf_err("Unsupported tensor type %s\n",
+                    TfLiteTypeGetName(tensor->type));
+        }
+
+        return false;
+    }
+
+    bool Preprocess::_ComputeDeltas(Array2d<float>& mfcc,
+                                    Array2d<float>& delta1,
+                                    Array2d<float>& delta2)
+    {
+        const std::vector <float> delta1Coeffs =
+            {6.66666667e-02,  5.00000000e-02,  3.33333333e-02,
+             1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
+            -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
+
+        const std::vector <float> delta2Coeffs =
+            {0.06060606,      0.01515152,     -0.01731602,
+            -0.03679654,     -0.04329004,     -0.03679654,
+            -0.01731602,      0.01515152,      0.06060606};
+
+        if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
+            mfcc.size(0) == 0 || mfcc.size(1) == 0) {
+            return false;
+        }
+
+        /* Get the middle index; coeff vec len should always be odd. */
+        const size_t coeffLen = delta1Coeffs.size();
+        const size_t fMidIdx = (coeffLen - 1)/2;
+        const size_t numFeatures = mfcc.size(0);
+        const size_t numFeatVectors = mfcc.size(1);
+
+        /* Iterate through features in MFCC vector. */
+        for (size_t i = 0; i < numFeatures; ++i) {
+            /* For each feature, iterate through time (t) samples representing feature evolution and
+             * calculate d/dt and d^2/dt^2, using 1d convolution with differential kernels.
+             * Convolution padding = valid, result size is `time length - kernel length + 1`.
+             * The result is padded with 0 from both sides to match the size of initial time samples data.
+             *
+             * For the small filter, conv1d implementation as a simple loop is efficient enough.
+             * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
+             */
+
+            for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j) {
+                float d1 = 0;
+                float d2 = 0;
+                const size_t mfccStIdx = j - fMidIdx;
+
+                for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m) {
+
+                    d1 +=  mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
+                    d2 +=  mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
+                }
+
+                delta1(i,j) = d1;
+                delta2(i,j) = d2;
+            }
+        }
+
+        return true;
+    }
+
+    float Preprocess::_GetMean(Array2d<float>& vec)
+    {
+        return math::MathUtils::MeanF32(vec.begin(), vec.totalSize());
+    }
+
+    float Preprocess::_GetStdDev(Array2d<float>& vec, const float mean)
+    {
+        return math::MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
+    }
+
+    void Preprocess::_NormaliseVec(Array2d<float>& vec)
+    {
+        auto mean = Preprocess::_GetMean(vec);
+        auto stddev = Preprocess::_GetStdDev(vec, mean);
+
+        debug("Mean: %f, Stddev: %f\n", mean, stddev);
+        if (stddev == 0) {
+            std::fill(vec.begin(), vec.end(), 0);
+        } else {
+            const float stddevInv = 1.f/stddev;
+            const float normalisedMean = mean/stddev;
+
+            auto NormalisingFunction = [=](float& value) {
+                value = value * stddevInv - normalisedMean;
+            };
+            std::for_each(vec.begin(), vec.end(), NormalisingFunction);
+        }
+    }
+
+    void Preprocess::_Normalise()
+    {
+        Preprocess::_NormaliseVec(this->_m_mfccBuf);
+        Preprocess::_NormaliseVec(this->_m_delta1Buf);
+        Preprocess::_NormaliseVec(this->_m_delta2Buf);
+    }
+
+    float Preprocess::_GetQuantElem(
+                const float     elem,
+                const float     quantScale,
+                const int       quantOffset,
+                const float     minVal,
+                const float     maxVal)
+    {
+        float val = std::round((elem/quantScale) + quantOffset);
+        return std::min<float>(std::max<float>(val, minVal), maxVal);
+    }
+
+} /* namespace asr */
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
\ No newline at end of file
diff --git a/source/use_case/kws_asr/usecase.cmake b/source/use_case/kws_asr/usecase.cmake
new file mode 100644
index 0000000..f15bc73
--- /dev/null
+++ b/source/use_case/kws_asr/usecase.cmake
@@ -0,0 +1,259 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# If the path to a directory or source file has been defined,
+# get the type here (FILEPATH or PATH):
+if (DEFINED ${use_case}_FILE_PATH)
+    get_path_type(${${use_case}_FILE_PATH} PATH_TYPE)
+
+    # Set the default type if path is not a dir or file path (or undefined)
+    if (NOT ${PATH_TYPE} STREQUAL PATH AND NOT ${PATH_TYPE} STREQUAL FILEPATH)
+        message(FATAL_ERROR "Invalid ${use_case}_FILE_PATH. It should be a dir or file path.")
+    endif()
+else()
+    # Default is a directory path
+    set(PATH_TYPE PATH)
+endif()
+
+message(STATUS "${use_case}_FILE_PATH is of type: ${PATH_TYPE}")
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with WAV files, or path to a single WAV file, to use in the evaluation application."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+    ${PATH_TYPE})
+
+USER_OPTION(${use_case}_AUDIO_RATE "Specify the target sampling rate. Default is 16000."
+    16000
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MONO "Specify if the audio needs to be converted to mono. Default is ON."
+    ON
+    BOOL)
+
+USER_OPTION(${use_case}_AUDIO_OFFSET "Specify the offset to start reading after this time (in seconds). Default is 0."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_DURATION "Specify the audio duration to load (in seconds). If set to 0 the entire audio will be processed."
+    0
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_RES_TYPE "Specify re-sampling algorithm to use. By default is 'kaiser_best'."
+    kaiser_best
+    STRING)
+
+USER_OPTION(${use_case}_AUDIO_MIN_SAMPLES "Specify the minimum number of samples to use. By default is 16000, if the audio is shorter will be automatically padded."
+    16000
+    STRING)
+
+# Generate audio .cc files:
+generate_audio_code(${${use_case}_FILE_PATH} ${SRC_GEN_DIR} ${INC_GEN_DIR}
+        ${${use_case}_AUDIO_RATE}
+        ${${use_case}_AUDIO_MONO}
+        ${${use_case}_AUDIO_OFFSET}
+        ${${use_case}_AUDIO_DURATION}
+        ${${use_case}_AUDIO_RES_TYPE}
+        ${${use_case}_AUDIO_MIN_SAMPLES})
+
+# Generate kws labels file:
+USER_OPTION(${use_case}_LABELS_TXT_FILE_KWS "Labels' txt file for the chosen model."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/labels/ds_cnn_labels.txt
+    FILEPATH)
+
+set(${use_case}_LABELS_CPP_FILE_KWS Labels_dscnn)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE_KWS}"
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE_KWS}"
+    NAMESPACE       "arm" "app" "kws"
+)
+
+# Generate asr labels file:
+USER_OPTION(${use_case}_LABELS_TXT_FILE_ASR "Labels' txt file for the chosen model."
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/labels/labels_wav2letter.txt
+    FILEPATH)
+
+set(${use_case}_LABELS_CPP_FILE_ASR Labels_wav2letter)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE_ASR}"
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE_ASR}"
+    NAMESPACE       "arm" "app" "asr"
+)
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+    0x00200000
+    STRING)
+
+USER_OPTION(${use_case}_MODEL_SCORE_THRESHOLD_KWS "Specify the score threshold [0.0, 1.0) that must be applied to the KWS results for a label to be deemed valid."
+    0.9
+    STRING)
+
+USER_OPTION(${use_case}_MODEL_SCORE_THRESHOLD_ASR "Specify the score threshold [0.0, 1.0) that must be applied to the ASR results for a label to be deemed valid."
+    0.5
+    STRING)
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH_KWS)
+
+    set(SUB_USECASE_KWS                 "kws")
+    set(MODEL_FILENAME_KWS          ds_cnn_clustered_int8.tflite)
+    set(MODEL_RESOURCES_DIR_KWS     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY             ${MODEL_RESOURCES_DIR_KWS})
+    set(DEFAULT_MODEL_PATH_KWS      ${MODEL_RESOURCES_DIR_KWS}/${MODEL_FILENAME_KWS})
+
+    # Download the default model
+    set(ZOO_COMMON_SUBPATH_KWS      "models/keyword_spotting/ds_cnn_large/tflite_clustered_int8")
+    set(ZOO_MODEL_SUBPATH_KWS       "${ZOO_COMMON_SUBPATH_KWS}/${MODEL_FILENAME_KWS}")
+
+    download_file_from_modelzoo(${ZOO_MODEL_SUBPATH_KWS}    ${DEFAULT_MODEL_PATH_KWS})
+
+    if (ETHOS_U55_ENABLED)
+        message(STATUS
+            "Ethos-U55 is enabled, but the model downloaded is not optimized by vela. "
+            "To use Ethos-U55 acceleration, optimise the downloaded model and pass it "
+            "as ${use_case}_MODEL_TFLITE_PATH_KWS to the CMake configuration.")
+    endif()
+
+    if (${TARGET_PLATFORM} STREQUAL native)
+
+        # Download test vectors
+        set(ZOO_TEST_IFM_SUBPATH_KWS    "${ZOO_COMMON_SUBPATH_KWS}/testing_input/input_2/0.npy")
+        set(ZOO_TEST_OFM_SUBPATH_KWS    "${ZOO_COMMON_SUBPATH_KWS}/testing_output/Identity/0.npy")
+
+        file(MAKE_DIRECTORY         ${MODEL_RESOURCES_DIR_KWS}/${SUB_USECASE_KWS})
+        set(${use_case}_TEST_IFM    ${MODEL_RESOURCES_DIR_KWS}/${SUB_USECASE_KWS}/ifm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}-${SUB_USECASE_KWS}")
+        set(${use_case}_TEST_OFM    ${MODEL_RESOURCES_DIR_KWS}/${SUB_USECASE_KWS}/ofm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}-${SUB_USECASE_KWS}.")
+
+        download_file_from_modelzoo(${ZOO_TEST_IFM_SUBPATH_KWS} ${${use_case}_TEST_IFM})
+        download_file_from_modelzoo(${ZOO_TEST_OFM_SUBPATH_KWS} ${${use_case}_TEST_OFM})
+        set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+        set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+        file(MAKE_DIRECTORY ${TEST_SRC_GEN_DIR} ${TEST_INC_GEN_DIR})
+
+        generate_test_data_code(
+                            INPUT_DIR "${DOWNLOAD_DEP_DIR}/${use_case}/${SUB_USECASE_KWS}"
+                            DESTINATION_SRC ${TEST_SRC_GEN_DIR}
+                            DESTINATION_HDR ${TEST_INC_GEN_DIR}
+                            USECASE ${SUB_USECASE_KWS}
+                            NAMESPACE   "arm" "app" ${SUB_USECASE_KWS})
+    endif()
+
+else()
+    set(DEFAULT_MODEL_PATH_KWS  "N/A")
+endif()
+
+set(EXTRA_MODEL_CODE_KWS
+    "/* Model parameters for ${use_case} */"
+    "extern const uint32_t   g_NumMfcc = 10"
+    "extern const uint32_t   g_NumAudioWins = 49"
+    "extern const int        g_FrameLength = 640"
+    "extern const int        g_FrameStride = 320"
+    "extern const float      g_ScoreThreshold = ${${use_case}_MODEL_SCORE_THRESHOLD_KWS}"
+    )
+
+# If there is no tflite file pointed to
+if (NOT DEFINED ${use_case}_MODEL_TFLITE_PATH_ASR)
+
+    set(SUB_USECASE_ASR             "asr")
+    set(MODEL_FILENAME_ASR          wav2letter_int8.tflite)
+    set(MODEL_RESOURCES_DIR_ASR     ${DOWNLOAD_DEP_DIR}/${use_case})
+    file(MAKE_DIRECTORY             ${MODEL_RESOURCES_DIR_ASR})
+    set(DEFAULT_MODEL_PATH_ASR      ${MODEL_RESOURCES_DIR_ASR}/${MODEL_FILENAME_ASR})
+
+    # Download the default model
+    set(ZOO_COMMON_SUBPATH_ASR      "models/speech_recognition/wav2letter/tflite_int8")
+    set(ZOO_MODEL_SUBPATH_ASR       "${ZOO_COMMON_SUBPATH_ASR}/${MODEL_FILENAME_ASR}")
+
+    download_file_from_modelzoo(${ZOO_MODEL_SUBPATH_ASR}    ${DEFAULT_MODEL_PATH_ASR})
+
+    if (ETHOS_U55_ENABLED)
+        message(STATUS
+            "Ethos-U55 is enabled, but the model downloaded is not optimized by vela. "
+            "To use Ethos-U55 acceleration, optimise the downloaded model and pass it "
+            "as ${use_case}_MODEL_TFLITE_PATH to the CMake configuration.")
+    endif()
+
+    # If the target platform is native
+    if (${TARGET_PLATFORM} STREQUAL native)
+
+        # Download test vectors
+        set(ZOO_TEST_IFM_SUBPATH_ASR    "${ZOO_COMMON_SUBPATH_ASR}/testing_input/input_2_int8/0.npy")
+        set(ZOO_TEST_OFM_SUBPATH_ASR    "${ZOO_COMMON_SUBPATH_ASR}/testing_output/Identity_int8/0.npy")
+
+        file(MAKE_DIRECTORY             ${MODEL_RESOURCES_DIR_ASR}/${SUB_USECASE_ASR})
+        set(${use_case}_TEST_IFM_ASR    ${MODEL_RESOURCES_DIR_ASR}/${SUB_USECASE_ASR}/ifm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}-${SUB_USECASE_ASR}")
+        set(${use_case}_TEST_OFM_ASR    ${MODEL_RESOURCES_DIR_ASR}/${SUB_USECASE_ASR}/ofm0.npy CACHE FILEPATH
+                                    "Input test vector for ${use_case}-${SUB_USECASE_ASR}")
+
+        download_file_from_modelzoo(${ZOO_TEST_IFM_SUBPATH_KWS} ${${use_case}_TEST_IFM_ASR})
+        download_file_from_modelzoo(${ZOO_TEST_OFM_SUBPATH_KWS} ${${use_case}_TEST_OFM_ASR})
+
+        set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+        set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+        file(MAKE_DIRECTORY ${TEST_SRC_GEN_DIR} ${TEST_INC_GEN_DIR})
+
+        # Generate test data files to be included in x86 tests
+        generate_test_data_code(
+                            INPUT_DIR "${DOWNLOAD_DEP_DIR}/${use_case}/${SUB_USECASE_ASR}"
+                            DESTINATION_SRC ${TEST_SRC_GEN_DIR}
+                            DESTINATION_HDR ${TEST_INC_GEN_DIR}
+                            USECASE ${SUB_USECASE_ASR}
+                            NAMESPACE   "arm" "app" ${SUB_USECASE_ASR})
+    endif()
+
+else()
+    set(DEFAULT_MODEL_PATH_ASR  "N/A")
+endif()
+
+set(EXTRA_MODEL_CODE_ASR
+    "/* Model parameters for ${use_case} */"
+    "extern const int   g_FrameLength    = 512"
+    "extern const int   g_FrameStride    = 160"
+    "extern const int   g_ctxLen         =  98"
+    "extern const float g_ScoreThreshold = ${${use_case}_MODEL_SCORE_THRESHOLD_ASR}"
+    )
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH_KWS "NN models file to be used for KWS in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH_KWS}
+    FILEPATH
+    )
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH_ASR "NN models file to be used for ASR in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH_ASR}
+    FILEPATH
+    )
+
+# Generate model file for KWS
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH_KWS}
+    DESTINATION ${SRC_GEN_DIR}
+    EXPRESSIONS ${EXTRA_MODEL_CODE_KWS}
+    NAMESPACE   "arm" "app" "kws"
+)
+
+# and for ASR
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH_ASR}
+    DESTINATION ${SRC_GEN_DIR}
+    EXPRESSIONS ${EXTRA_MODEL_CODE_ASR}
+    NAMESPACE   "arm" "app" "asr"
+)
diff --git a/tests/common/AppContextTest.cc b/tests/common/AppContextTest.cc
new file mode 100644
index 0000000..42b142d
--- /dev/null
+++ b/tests/common/AppContextTest.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AppContext.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Common: Application context")
+{
+    SECTION("Add primitive type Parameter")
+    {
+        arm::app::ApplicationContext context;
+        context.Set<uint32_t>("imgIndex", 0);
+        auto data = context.Get<uint32_t>("imgIndex");
+
+        REQUIRE(0 == data);
+
+    }
+
+    SECTION("Add object parameter")
+    {
+        arm::app::ApplicationContext context;
+        std::vector <std::string> vect{"a"};
+        context.Set<std::vector <std::string>>("vect", vect);
+        auto data = context.Get<std::vector <std::string>>("vect");
+
+        REQUIRE(vect == data);
+    }
+
+    SECTION("Add reference object parameter")
+    {
+        arm::app::ApplicationContext context;
+        std::vector <std::string> vect{"a"};
+        context.Set<std::vector <std::string>&>("vect", vect);
+        auto data = context.Get<std::vector <std::string>&>("vect");
+
+        REQUIRE(vect == data);
+    }
+
+    SECTION("Add object pointer parameter")
+    {
+        arm::app::ApplicationContext context;
+        std::vector <std::string>* vect = new std::vector <std::string>{"a"};
+        context.Set<std::vector <std::string>*>("vect", vect);
+        auto data = context.Get<std::vector <std::string>*>("vect");
+
+        REQUIRE(vect == data);
+        delete(vect);
+    }
+}
\ No newline at end of file
diff --git a/tests/common/ClassifierTests.cc b/tests/common/ClassifierTests.cc
new file mode 100644
index 0000000..f08a09a
--- /dev/null
+++ b/tests/common/ClassifierTests.cc
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Classifier.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Common classifier")
+{
+    SECTION("Test invalid classifier")
+    {
+        TfLiteTensor* outputTens = nullptr;
+        std::vector <arm::app::ClassificationResult> resultVec;
+        arm::app::Classifier classifier;
+        REQUIRE(!classifier.GetClassificationResults(outputTens, resultVec, {}, 5));
+    }
+
+    SECTION("Test valid classifier UINT8")
+    {
+        const int dimArray[] = {1, 1001};
+        std::vector <std::string> labels(1001);
+        std::vector <uint8_t> outputVec(1001);
+        TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+        TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+                outputVec.data(), dims, 1, 0, "test");
+        TfLiteTensor* outputTensor = &tfTensor;
+        std::vector <arm::app::ClassificationResult> resultVec;
+        arm::app::Classifier classifier;
+        REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 5));
+        REQUIRE(5 == resultVec.size());
+    }
+
+    SECTION("Get classification results")
+    {
+        const int dimArray[] = {1, 1001};
+        std::vector <std::string> labels(1001);
+        std::vector<uint8_t> outputVec(1001, static_cast<uint8_t>(5));
+        TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+        TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+                outputVec.data(), dims, 1, 0, "test");
+        TfLiteTensor* outputTensor = &tfTensor;
+
+        std::vector <arm::app::ClassificationResult> resultVec;
+
+        /* Set the top five results. */
+        std::vector<std::pair<uint32_t, uint8_t>> selectedResults {
+                {0, 8}, {20, 7}, {10, 7}, {15, 9}, {1000, 10}};
+
+        for (size_t i = 0; i < selectedResults.size(); ++i) {
+            outputVec[selectedResults[i].first] = selectedResults[i].second;
+        }
+
+        arm::app::Classifier classifier;
+        REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 5));
+        REQUIRE(5 == resultVec.size());
+
+        REQUIRE(resultVec[0].m_labelIdx == 1000);
+        REQUIRE(resultVec[1].m_labelIdx == 15);
+        REQUIRE(resultVec[2].m_labelIdx == 0);
+        REQUIRE(resultVec[3].m_labelIdx == 20);
+        REQUIRE(resultVec[4].m_labelIdx == 10);
+    }
+}
diff --git a/tests/common/ProfilerTests.cc b/tests/common/ProfilerTests.cc
new file mode 100644
index 0000000..caf492b
--- /dev/null
+++ b/tests/common/ProfilerTests.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Profiler.hpp"
+
+#include "AppContext.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <catch.hpp>
+#include <iostream>
+
+
+TEST_CASE("Common: Test Profiler")
+{
+    hal_platform    platform;
+    data_acq_module data_acq {};
+    data_psn_module data_psn {};
+    platform_timer  timer {};
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* An invalid profiler shouldn't be of much use. */
+    arm::app::Profiler profilerInvalid {nullptr, "test_invalid"};
+    REQUIRE(false == profilerInvalid.StartProfiling());
+    REQUIRE(false == profilerInvalid.StopProfiling());
+
+    arm::app::Profiler profilerValid{&platform, "test_valid"};
+    REQUIRE(true == profilerValid.StartProfiling());
+    REQUIRE(true == profilerValid.StopProfiling());
+
+    std::string strProfile = profilerValid.GetResultsAndReset();
+    REQUIRE(std::string::npos != strProfile.find("test_valid"));
+
+#if defined(CPU_PROFILE_ENABLED)
+    /* We should have milliseconds elapsed. */
+    REQUIRE(std::string::npos != strProfile.find("ms"));
+#endif /* defined(CPU_PROFILE_ENABLED) */
+
+    /* Abuse should fail: */
+    REQUIRE(false == profilerValid.StopProfiling());  /* We need to start it first. */
+    REQUIRE(true == profilerValid.StartProfiling());  /* Should be able to start it fine. */
+    REQUIRE(false == profilerValid.StartProfiling()); /* Can't restart it without resetting. */
+    profilerValid.Reset();                            /* Reset. */
+    REQUIRE(true == profilerValid.StartProfiling());  /* Can start it again now. */
+    REQUIRE(true == profilerValid.StopProfiling());   /* Can start it again now. */
+}
diff --git a/tests/common/SlidingWindowTests.cc b/tests/common/SlidingWindowTests.cc
new file mode 100644
index 0000000..bfdb5b7
--- /dev/null
+++ b/tests/common/SlidingWindowTests.cc
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AudioUtils.hpp"
+#include "catch.hpp"
+
+TEST_CASE("Common: Slide long data")
+{
+    std::vector<int> test{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+
+    SECTION("Fit the data")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+
+        for (int i = 0 ; i < 10; ++i) {
+            REQUIRE(slider.HasNext());
+            REQUIRE(*slider.Next() == i + 1);
+        }
+
+        REQUIRE(!slider.HasNext());
+        REQUIRE(nullptr == slider.Next());
+    }
+
+    SECTION("Fit the data stride> window")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 2, 3);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 1);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 4);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 7);
+
+        REQUIRE(!slider.HasNext());
+        REQUIRE(nullptr == slider.Next());
+    }
+
+    SECTION("Fit the data stride < window")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 5, 1);
+
+        for (int i = 0 ; i < 6; i++) {
+            REQUIRE(slider.HasNext());
+            REQUIRE(*slider.Next() == i + 1);
+        }
+
+        REQUIRE(!slider.HasNext());
+        REQUIRE(nullptr == slider.Next());
+    }
+}
+
+
+TEST_CASE("Common: Slide data size 1")
+{
+    std::vector<int> test{1};
+
+    SECTION("Fit the data")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 1);
+        REQUIRE(!slider.HasNext());
+        REQUIRE(nullptr == slider.Next());
+    }
+
+    SECTION("Does not Fit the data because of big window")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 2, 1);
+
+        REQUIRE(!slider.HasNext());
+        REQUIRE(nullptr == slider.Next());
+    }
+
+    SECTION("Does not Fit the data because of big stride")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 2);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 1);
+        REQUIRE(!slider.HasNext());
+        REQUIRE(nullptr == slider.Next());
+    }
+
+}
+
+
+TEST_CASE("Common: Slide reset")
+{
+    SECTION("current range")
+    {
+        std::vector<int> test{1};
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+        int *saved = slider.Next();
+        slider.Reset();
+
+        REQUIRE(slider.Next() == saved);
+    }
+
+    SECTION("new range")
+    {
+        std::vector<int> test{1};
+        std::vector<int> test2{100};
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+        slider.Next();
+        slider.Reset(test2.data());
+
+        REQUIRE(*slider.Next() == 100);
+    }
+}
+
+
+TEST_CASE("Common: Slide fast forward")
+{
+    std::vector<int> test{1, 2, 3, 4, 5};
+
+    auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+    SECTION("at the beginning") {
+        slider.FastForward(3);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 4);
+    }
+
+    SECTION("in the middle")
+    {
+        slider.Next();
+        slider.FastForward(3);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 4);
+    }
+
+    SECTION("at the end")
+    {
+        while(slider.HasNext()) {
+            slider.Next();
+        }
+        slider.FastForward(3);
+
+        REQUIRE(slider.HasNext());
+        REQUIRE(*slider.Next() == 4);
+    }
+
+    SECTION("out of the range")
+    {
+        slider.FastForward(100);
+
+        REQUIRE(!slider.HasNext());
+        REQUIRE(slider.Next() == nullptr);
+    }
+}
+
+
+TEST_CASE("Common: Slide Index")
+{
+    std::vector<int> test{1, 2, 3, 4, 5};
+    auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+    REQUIRE(slider.Index() == 0);
+    for (int i = 0; i < 5; i++) {
+        slider.Next();
+        REQUIRE(slider.Index() == i);
+    }
+}
+
+
+TEST_CASE("Common: Total strides") 
+{
+    std::vector<int> test{1, 2, 3, 4, 5};
+
+    SECTION("Element by element")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 1);
+        REQUIRE(slider.TotalStrides() == 4 );
+    }
+
+    SECTION("Step through element")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 1, 2);
+        REQUIRE(slider.TotalStrides() == 2 );
+    }
+
+    SECTION("Window = data")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 5, 2);
+        REQUIRE(slider.TotalStrides() == 0 );
+    }
+
+    SECTION("Window > data")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 6, 2);
+        REQUIRE(slider.TotalStrides() == 0 );
+    }
+
+    SECTION("Window < data, not enough for the next stride")
+    {
+        auto slider = arm::app::audio::SlidingWindow<int>(test.data(), test.size(), 4, 2);
+        REQUIRE(slider.TotalStrides() == 0 );
+    }
+}
+
+
+TEST_CASE("Common: Next window data index")
+{
+    std::vector<int> test{1, 2, 3, 4, 5};
+
+    /* Check we get the correct index returned */
+    SECTION("Stride 1")
+    {
+        auto slider = arm::app::audio::ASRSlidingWindow<int>(test.data(), test.size(), 1, 1);
+        REQUIRE(slider.NextWindowStartIndex() == 0);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 1);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 2);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 3);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 4);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 5);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 5);
+    }
+
+    SECTION("Stride 2")
+    {
+        auto slider = arm::app::audio::ASRSlidingWindow<int>(test.data(), test.size(), 1, 2);
+        REQUIRE(slider.NextWindowStartIndex() == 0);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 2);
+        REQUIRE(slider.NextWindowStartIndex() == 2);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 4);
+    }
+
+    SECTION("Stride 3")
+    {
+        auto slider = arm::app::audio::ASRSlidingWindow<int>(test.data(), test.size(), 1, 3);
+        REQUIRE(slider.NextWindowStartIndex() == 0);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 3);
+        REQUIRE(slider.NextWindowStartIndex() == 3);
+        slider.Next();
+        REQUIRE(slider.NextWindowStartIndex() == 6);
+        REQUIRE(!slider.HasNext());
+        REQUIRE(slider.Next() == nullptr);
+        REQUIRE(slider.NextWindowStartIndex() == 6);
+    }
+}
diff --git a/tests/resources/golden_fv/AdGoldenInput.hpp b/tests/resources/golden_fv/AdGoldenInput.hpp
new file mode 100644
index 0000000..41d1a89
--- /dev/null
+++ b/tests/resources/golden_fv/AdGoldenInput.hpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AD_GOLDEN_OUTPUTS_HPP
+#define AD_GOLDEN_OUTPUTS_HPP
+
+#include "Model.hpp"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#ifndef AD_FEATURE_VEC_DATA_SIZE
+#define AD_IN_FEATURE_VEC_DATA_SIZE       (1024)
+#define AD_OUT_FEATURE_VEC_DATA_SIZE       (8)
+#endif /* AD_FEATURE_VEC_DATA_SIZE */
+
+/* Golden input. */
+uint8_t ad_golden_input[AD_IN_FEATURE_VEC_DATA_SIZE] = {
+  0x7a, 0x7c, 0x7f, 0x7c, 0x7b, 0x61, 0x7f, 0x75, 0x7f, 0x6b, 0x6d, 0x71,
+  0x7f, 0x6e, 0x7a, 0x76, 0x7f, 0x7f, 0x65, 0x7f, 0x7f, 0x6f, 0x5d, 0x67,
+  0x7f, 0x5b, 0x7a, 0x43, 0x65, 0x69, 0x4e, 0x63, 0x5f, 0x56, 0x4e, 0x64,
+  0x36, 0x4a, 0x3c, 0x56, 0x3a, 0x61, 0x71, 0x74, 0x72, 0x6f, 0x71, 0x71,
+  0x66, 0x7b, 0x68, 0x62, 0x44, 0x5f, 0x2f, 0x3d, 0x56, 0x5b, 0x5b, 0x4d,
+  0x5f, 0x59, 0x58, 0x6e, 0x4f, 0x3d, 0x48, 0x5f, 0x46, 0x38, 0x48, 0x4f,
+  0x46, 0x74, 0x6d, 0x61, 0x60, 0x63, 0x6a, 0x68, 0x62, 0x69, 0x63, 0x5c,
+  0x6e, 0x4d, 0x4c, 0x3c, 0x5a, 0x55, 0x4a, 0x5d, 0x59, 0x66, 0x67, 0x6c,
+  0x55, 0x40, 0x49, 0x47, 0x56, 0x61, 0x48, 0x4d, 0x64, 0x61, 0x77, 0x7e,
+  0x37, 0x63, 0x52, 0x3e, 0x57, 0x4d, 0x54, 0x4f, 0x3b, 0x4c, 0x54, 0x50,
+  0x55, 0x4c, 0x57, 0x58, 0x45, 0x7b, 0x74, 0x58, 0x39, 0x34, 0x3c, 0x38,
+  0x3e, 0x1e, 0x22, 0x46, 0x2c, 0x7a, 0x6e, 0x74, 0x72, 0x70, 0x61, 0x7d,
+  0x69, 0x79, 0x66, 0x3c, 0x33, 0x20, 0x3a, 0x20, 0x34, 0x29, 0x3c, 0x4e,
+  0x47, 0x5b, 0x78, 0x47, 0x36, 0x30, 0x18, 0x45, 0x24, 0x25, 0x2c, 0x75,
+  0x71, 0x70, 0x7f, 0x73, 0x6c, 0x6c, 0x77, 0x6a, 0x6d, 0x6b, 0x6d, 0x5b,
+  0x36, 0x43, 0x34, 0x2b, 0x2e, 0x3b, 0x32, 0x67, 0x2a, 0x5b, 0x7f, 0x66,
+  0x32, 0x24, 0x28, 0x29, 0x2d, 0x31, 0x2b, 0x1d, 0x0d, 0x40, 0x3a, 0x49,
+  0x3b, 0x42, 0x37, 0x43, 0x40, 0x44, 0x4f, 0x40, 0x25, 0x1b, 0x33, 0x32,
+  0x15, 0x3b, 0x22, 0x33, 0x45, 0x4e, 0x45, 0x3a, 0x2a, 0x10, 0x19, 0x3a,
+  0x27, 0x25, 0x18, 0x0d, 0x0d, 0x3a, 0x36, 0x2d, 0x21, 0x34, 0x25, 0x38,
+  0x26, 0x33, 0x2f, 0x33, 0x2a, 0x21, 0x36, 0x1e, 0x2b, 0x28, 0x29, 0x31,
+  0x25, 0x2e, 0x4d, 0x32, 0x42, 0x1b, 0x37, 0x1a, 0x34, 0x36, 0x23, 0x25,
+  0x2b, 0x3f, 0x46, 0x41, 0x4c, 0x35, 0x39, 0x2d, 0x37, 0x4d, 0x42, 0x46,
+  0x27, 0x36, 0x20, 0x34, 0x10, 0x32, 0x19, 0x21, 0x33, 0x3b, 0x39, 0x46,
+  0x10, 0xf3, 0x26, 0x13, 0x24, 0x1f, 0x28, 0x18, 0x29, 0x20, 0x20, 0x14,
+  0x1f, 0x17, 0x27, 0x34, 0x38, 0x3e, 0x26, 0x37, 0x20, 0x33, 0x16, 0x26,
+  0x0e, 0x1c, 0x31, 0x1d, 0x23, 0x2e, 0x44, 0x38, 0x25, 0x2f, 0x27, 0x39,
+  0x3b, 0x2b, 0x36, 0x3c, 0x35, 0x3f, 0x2d, 0x3f, 0x42, 0x32, 0x43, 0x4e,
+  0x55, 0x3d, 0x17, 0x2e, 0x39, 0x20, 0x31, 0x41, 0x38, 0x2b, 0x31, 0x38,
+  0x46, 0x3f, 0x1e, 0x2d, 0x0a, 0x07, 0x18, 0xfc, 0x1a, 0x14, 0xec, 0x25,
+  0x1e, 0x30, 0x3c, 0x37, 0x2a, 0x47, 0x3c, 0x3b, 0x3b, 0x37, 0x2b, 0x22,
+  0x18, 0x1b, 0x31, 0x1e, 0x0e, 0x22, 0x13, 0x11, 0x10, 0x10, 0x14, 0x22,
+  0x0c, 0x0e, 0x18, 0x03, 0x0e, 0x03, 0x03, 0x08, 0x26, 0x0c, 0x1a, 0x0a,
+  0x06, 0x2d, 0x1e, 0x1e, 0x14, 0x1a, 0x26, 0x1d, 0x06, 0x06, 0x1c, 0x0e,
+  0x16, 0x06, 0x09, 0x13, 0x1c, 0x21, 0x25, 0x0d, 0x1c, 0x00, 0xfa, 0xfd,
+  0x13, 0xfa, 0x18, 0xff, 0x0d, 0x08, 0x23, 0x2e, 0x26, 0x1d, 0x14, 0x10,
+  0x14, 0x28, 0x14, 0x1b, 0x05, 0x0a, 0xff, 0xfa, 0x1a, 0x18, 0x11, 0x13,
+  0x16, 0x0f, 0x07, 0x12, 0x05, 0x00, 0x13, 0xf3, 0x05, 0x01, 0x01, 0x01,
+  0x1a, 0x16, 0x2b, 0x26, 0x1e, 0x21, 0x15, 0x13, 0x12, 0x28, 0x19, 0x1c,
+  0xff, 0xf8, 0xf0, 0x02, 0x01, 0x05, 0x04, 0x19, 0x11, 0x14, 0x1b, 0x15,
+  0xea, 0x19, 0x12, 0x02, 0x0f, 0x11, 0xee, 0xfe, 0x1c, 0x2f, 0x19, 0x38,
+  0x2a, 0x30, 0x26, 0x1e, 0x28, 0x29, 0x24, 0x12, 0x01, 0x12, 0xff, 0x0c,
+  0x09, 0x0b, 0x26, 0x24, 0x21, 0x24, 0x11, 0x21, 0x13, 0x17, 0xf8, 0x0c,
+  0xfd, 0x03, 0xf9, 0xfc, 0x03, 0x20, 0x14, 0x34, 0x1e, 0x30, 0x2e, 0x25,
+  0x23, 0x29, 0x16, 0x01, 0xf4, 0x05, 0x15, 0x05, 0xf9, 0x02, 0xea, 0x32,
+  0x38, 0x28, 0x28, 0x18, 0xf6, 0xfa, 0xfb, 0xff, 0xf9, 0xee, 0xff, 0x01,
+  0xfc, 0x41, 0x40, 0x2b, 0x2f, 0x1f, 0x1c, 0x20, 0x21, 0x28, 0x2b, 0x17,
+  0xfd, 0xf9, 0x07, 0xf5, 0x01, 0xf8, 0xfe, 0xfe, 0x11, 0x14, 0x27, 0x0e,
+  0xf5, 0xfd, 0xe7, 0x05, 0xef, 0xee, 0xe9, 0xf6, 0xff, 0x10, 0x0c, 0x16,
+  0x15, 0x16, 0x0d, 0x1c, 0x15, 0x1c, 0x09, 0xf6, 0x00, 0xfe, 0xf4, 0xf8,
+  0xe1, 0x0a, 0xfa, 0x05, 0x0b, 0x1b, 0x19, 0x05, 0xf0, 0xec, 0xec, 0x02,
+  0xf0, 0xec, 0xf4, 0xec, 0xe7, 0x30, 0x47, 0x2d, 0x31, 0x2a, 0x24, 0x31,
+  0x1f, 0x31, 0x2c, 0x07, 0xe4, 0xfa, 0xef, 0xee, 0xfe, 0xf5, 0xf1, 0x1a,
+  0x18, 0x33, 0x1c, 0x10, 0xf5, 0xce, 0xeb, 0xe3, 0xf1, 0xec, 0xe7, 0xed,
+  0xde, 0x19, 0x2e, 0x11, 0x1f, 0x17, 0x19, 0x23, 0x17, 0x1d, 0x1b, 0x01,
+  0xef, 0xed, 0xeb, 0xf8, 0xe9, 0xf0, 0xf3, 0x1c, 0x19, 0x2a, 0x0d, 0x19,
+  0xe3, 0xe1, 0xee, 0xe0, 0xe7, 0xee, 0xe7, 0xee, 0xea, 0x30, 0x2e, 0x2f,
+  0x32, 0x11, 0x29, 0x38, 0x0f, 0x1a, 0x29, 0x24, 0xe6, 0xfa, 0xfc, 0xf0,
+  0xed, 0xec, 0xe1, 0x2e, 0x13, 0x29, 0x22, 0x16, 0xd4, 0xee, 0xeb, 0xee,
+  0xd3, 0xe4, 0xe4, 0xe5, 0xee, 0x23, 0x08, 0x10, 0x15, 0x17, 0x1b, 0x2b,
+  0x1b, 0x1d, 0x17, 0x03, 0xf8, 0xdd, 0xe3, 0xe8, 0xe5, 0xd8, 0xdb, 0x09,
+  0xf5, 0x10, 0x16, 0x14, 0xd9, 0xde, 0xd0, 0xd7, 0xd7, 0xda, 0xe0, 0xe1,
+  0xe4, 0x0f, 0x17, 0x04, 0x10, 0x0c, 0x14, 0x1d, 0x1d, 0x17, 0x15, 0xfa,
+  0xd9, 0xd6, 0xe1, 0xdf, 0xd7, 0xdf, 0xd5, 0x18, 0xf5, 0x14, 0x08, 0x0b,
+  0xd0, 0xd6, 0xd4, 0xd7, 0xc6, 0xda, 0xc5, 0xda, 0xdc, 0x14, 0x26, 0x15,
+  0x19, 0x0d, 0x0b, 0x0f, 0x07, 0x09, 0x1b, 0x03, 0xdc, 0xda, 0xd6, 0xd8,
+  0xc0, 0xc5, 0xdc, 0x08, 0xfb, 0x10, 0x0d, 0x13, 0xd8, 0xde, 0xcb, 0xd6,
+  0xd0, 0xc7, 0xcd, 0xdc, 0xc3, 0x0c, 0x0f, 0x08, 0x13, 0x13, 0x25, 0x25,
+  0x13, 0x1f, 0x11, 0xee, 0xd0, 0xd0, 0xd2, 0xcd, 0xbf, 0xca, 0xd2, 0x0a,
+  0xeb, 0x04, 0x03, 0x10, 0xd0, 0xcd, 0xb1, 0xc7, 0xca, 0xbf, 0xe1, 0xdd,
+  0xce, 0x2b, 0x19, 0x20, 0x20, 0x14, 0x24, 0x34, 0x27, 0x2c, 0x25, 0xf8,
+  0xd5, 0xc7, 0xcb, 0xba, 0xc0, 0xcf, 0xc4, 0x14, 0x06, 0x1f, 0x28, 0x10,
+  0xc9, 0xc9, 0xc3, 0xbf, 0xc4, 0xcd, 0xd2, 0xcc, 0xd4, 0x15, 0x27, 0x1e,
+  0x19, 0x05, 0x18, 0x1d, 0x24, 0x1a, 0x1f, 0xf9, 0xcc, 0xd3, 0xc3, 0xcd,
+  0xcc, 0xc1, 0xd5, 0x10, 0xe7, 0x1d, 0x12, 0x04, 0xb4, 0xb9, 0xb0, 0xb9,
+  0xb1, 0xb1, 0xbb, 0xd2, 0xb0, 0xff, 0x22, 0x06, 0xff, 0xf7, 0xfb, 0x07,
+  0xf8, 0xfa, 0x03, 0xef, 0xc2, 0xc7, 0xb9, 0xbb, 0xb4, 0xb2, 0xb5, 0xf2,
+  0xcc, 0x05, 0xf1, 0xea, 0xb0, 0xb1, 0xa7, 0xab, 0xa9, 0xa6, 0xb2, 0xbf,
+  0xb4, 0x04, 0x0f, 0x02, 0x0e, 0x0a, 0x05, 0x07, 0xff, 0x04, 0x0a, 0xe2,
+  0xac, 0xad, 0xa9, 0xa0, 0xa8, 0xa3, 0x9a, 0xef, 0xd3, 0xf5, 0x03, 0xfd,
+  0x9c, 0x99, 0x99, 0x9c, 0x96, 0x99, 0xaf, 0xa8, 0xaa, 0xfd, 0x09, 0x0e,
+  0xf5, 0xed, 0xe7, 0xf4, 0xec, 0xf4, 0xf1, 0xd5, 0xa5, 0x9a, 0x9e, 0x98,
+  0xa7, 0x99, 0xa2, 0xd9, 0xc2, 0xf1, 0xee, 0xec, 0x8a, 0x86, 0x8c, 0x8d,
+  0x84, 0x88, 0xa0, 0xad, 0xa2, 0xe5, 0x02, 0xff, 0xf5, 0xe4, 0xee, 0xf1,
+  0xf2, 0xec, 0xf0, 0xc9, 0x9e, 0x97, 0x98, 0x89, 0x8f, 0x88, 0x93, 0xd9,
+  0xd1, 0xf8, 0xeb, 0xde
+};
+
+/* Golden output */
+uint8_t ad_golden_out[AD_OUT_FEATURE_VEC_DATA_SIZE] = {
+        0x3a, 0x7f, 0x36, 0x76, 0x80, 0x80, 0x80, 0x80
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AD_GOLDEN_OUTPUTS_HPP */
diff --git a/tests/resources/golden_fv/AsrGoldenFeatures.hpp b/tests/resources/golden_fv/AsrGoldenFeatures.hpp
new file mode 100644
index 0000000..a230a52
--- /dev/null
+++ b/tests/resources/golden_fv/AsrGoldenFeatures.hpp
@@ -0,0 +1,931 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ASR_F_GOLDEN_HPP
+#define ASR_F_GOLDEN_HPP
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#include <stdint.h>
+
+size_t golden_asr_mfcc_len = 13U * 296U;
+size_t golden_diff1_len = golden_asr_mfcc_len;
+size_t golden_diff2_len = golden_asr_mfcc_len;
+
+static const float golden_asr_mfcc[] = {
+/* 13 features, 296 time samples */
+-739.77203,    4.43339,    4.14916,    3.94062,    3.75471,    3.36722,    2.75536,    2.15424,    1.70661,    1.26036,    0.63381,   -0.07275,   -0.61404,
+-741.22699,    2.43739,    2.19547,    1.81668,    1.33471,    0.79128,    0.23156,   -0.30073,   -0.76779,   -1.14135,   -1.40496,   -1.55474,   -1.59852,
+-740.28369,    3.77706,    3.55944,    3.23880,    2.85630,    2.43850,    1.98858,    1.49232,    0.93505,    0.31964,   -0.32507,   -0.95050,   -1.50613,
+-737.92542,    7.08773,    6.79751,    6.36016,    5.82753,    5.24427,    4.63403,    3.99524,    3.30806,    2.54938,    1.70949,    0.80348,   -0.12769,
+-737.88190,    7.17392,    6.94397,    6.56755,    6.05465,    5.41886,    4.67691,    3.84815,    2.95400,    2.01727,    1.06150,    0.11023,   -0.81362,
+-740.40613,    3.61488,    3.41837,    3.10070,    2.67607,    2.16330,    1.58483,    0.96553,    0.33144,   -0.29155,   -0.87899,   -1.40899,   -1.86323,
+-738.04297,    6.88258,    6.47138,    5.82175,    4.98088,    4.00386,    2.94619,    1.85797,    0.78088,   -0.25189,   -1.21348,   -2.08038,   -2.83026,
+-739.10394,    5.01142,    4.08197,    3.61993,    3.46862,    2.79344,    1.53009,    0.59595,    0.51155,    0.61026,    0.09230,   -0.73804,   -0.99980,
+-738.93628,    5.04577,    3.92978,    3.86487,    4.45355,    4.15855,    2.83126,    1.97671,    2.32735,    2.63497,    1.63883,    0.06747,   -0.52301,
+-742.09735,    1.27576,    1.23579,    1.17278,    1.08758,    0.97981,    0.85158,    0.70800,    0.55358,    0.39015,    0.21937,    0.04588,   -0.12424,
+-740.26038,    3.81065,    3.58432,    3.22149,    2.74267,    2.17462,    1.54851,    0.89788,    0.25634,   -0.34453,   -0.87725,   -1.32006,   -1.65818,
+-738.87238,    5.63912,    5.15087,    4.61127,    4.02923,    3.28429,    2.43234,    1.70882,    1.20934,    0.76495,    0.22197,   -0.31741,   -0.67138,
+-738.80823,    5.85002,    5.58049,    5.14473,    4.56218,    3.85858,    3.06471,    2.21470,    1.34432,    0.48918,   -0.31712,   -1.04458,   -1.66828,
+-737.43085,    7.76511,    7.40725,    6.84653,    6.11416,    5.24155,    4.27151,    3.25613,    2.23841,    1.24139,    0.27895,   -0.63012,   -1.46581,
+-737.83099,    7.12094,    6.58481,    5.70745,    4.61185,    3.33776,    2.02636,    0.73143,   -0.42896,   -1.41877,   -2.17289,   -2.68819,   -2.96023,
+-738.05359,    6.74544,    6.00377,    4.91264,    3.64911,    2.39284,    1.28308,    0.39052,   -0.28846,   -0.81449,   -1.27167,   -1.72931,   -2.21198,
+-733.09210,   11.21165,    6.83860,    6.50434,    8.39739,    6.62222,    1.12450,   -2.07976,   -0.50446,    1.11805,   -1.27790,   -4.70090,   -4.26429,
+-729.61566,   10.02992,   -1.95083,    1.63069,   11.62741,    8.21267,   -4.95377,   -8.36486,    0.19372,    4.14391,   -3.06428,   -8.99785,   -4.47088,
+-723.47961,   10.17970,   -9.74782,   -1.55895,   11.08994,    8.48012,   -0.16512,   -6.58712,   -3.57666,    7.86839,    7.80519,   -4.68555,   -5.37104,
+-703.88519,   18.29580,  -21.44657,   -6.79528,   12.08492,    8.75593,    2.90998,   -1.51398,   -1.36442,    6.59666,    1.81764,   -9.89391,   -4.18426,
+-678.76422,   19.09791,  -33.17283,    1.54777,   14.24737,    8.83076,    2.65867,   -3.69481,   -3.60083,   -0.10980,    0.55969,   -1.57821,   -3.58482,
+-597.23285,   37.85891,  -60.26614,   -0.12251,   14.22423,   -0.90098,    0.90394,    0.41278,   -4.10125,    6.44201,    6.07266,   11.16191,   -6.44330,
+-487.76270,   29.97835,  -62.39044,   -5.40837,   11.29997,    6.95592,   -5.83144,    0.48654,  -11.59783,   10.14861,    7.22627,   12.45544,  -14.93312,
+-468.18643,   32.67844,  -46.44635,   -7.10132,    3.99336,   12.08127,   -4.66342,    4.15338,  -17.32649,   -4.82752,  -10.19122,   -2.54350,  -13.77276,
+-469.04898,   37.73291,  -47.50086,    5.52127,    3.27568,    6.41865,  -13.02156,   -3.75707,   -4.57083,    0.00712,  -10.25383,  -10.10860,   -8.79650,
+-468.63696,   42.29890,  -48.59408,    5.51896,   12.85027,    3.59784,  -19.76094,   -5.88601,   -5.34028,    3.93530,   -8.92158,  -14.67978,  -14.06335,
+-484.16913,   47.05151,  -50.70958,   -3.16510,    6.97707,   11.57512,   -4.71207,   10.11598,    7.59590,   11.88781,   -5.98053,   -3.78069,   -1.87512,
+-513.99847,   38.55527,  -58.36864,   -3.13968,    5.45301,   10.91985,   -3.15140,   -0.44534,    0.41920,   15.86110,    3.14245,   -0.24400,  -10.29185,
+-525.34448,   53.88523,  -61.43047,   -6.67531,   16.82977,   18.57971,    1.81502,    6.59831,    3.63511,   16.62849,   -6.21790,   -0.93611,  -10.05373,
+-543.47412,   67.34111,  -53.03713,  -11.67502,   16.99445,   17.41154,   10.10119,    9.28551,   -2.13365,   15.32820,   -4.74680,    3.89862,    1.69543,
+-543.86810,   95.82130,  -44.91877,  -27.12481,    7.24652,    9.93297,    2.24394,    8.90679,    3.20333,    9.80860,  -13.70252,   -9.32810,   -1.20880,
+-540.39093,  118.15536,  -39.92270,  -31.31052,    8.73977,   20.39213,    2.70147,   -6.48235,  -11.14444,    2.80034,   -5.70351,   -9.33047,   -3.82105,
+-559.33344,  113.90231,  -45.61569,  -36.66971,   11.51530,   18.66145,    5.43105,   -5.79920,  -15.95238,   11.71870,    3.30188,  -17.29617,   -0.12055,
+-611.22729,   92.45084,  -33.29684,  -45.60332,   -0.57918,   19.67272,   18.25746,    2.54525,   -1.95916,   10.47044,    0.64767,  -23.26884,  -10.61353,
+-673.00159,   63.06639,    4.17435,  -13.07201,   15.45242,   36.55398,   28.04820,    6.53190,   -2.03077,   -4.86843,  -10.15063,   -7.98050,   11.46151,
+-685.77118,   60.38826,   23.26720,    5.49815,   14.32310,   24.30405,   22.31143,   14.91874,   12.03119,    8.62008,    2.74104,    0.13998,    4.97190,
+-614.80969,  123.72738,   24.36467,  -23.45894,   -9.78909,   12.62901,   13.46521,    0.35274,   -5.22431,    3.50167,    1.96512,  -11.65986,  -19.05206,
+-570.55817,  138.50626,   23.04182,  -24.43203,   -6.62432,    8.97017,   -1.10296,   -6.29671,  -15.93489,    5.53342,   -1.25716,  -13.43966,  -28.57895,
+-445.96667,  164.02409,    5.72825,  -52.08717,  -16.37173,   10.28557,   10.33937,   -2.31468,  -20.52966,   15.33060,   -3.55347,  -16.33257,  -28.28063,
+-363.43756,  175.12576,   -3.31680,  -66.65984,  -19.85510,    7.41258,   11.93293,   -0.79320,  -16.91802,   20.56325,   -0.02419,  -12.53419,  -39.70355,
+-348.60983,  154.19067,    7.74570,  -50.03997,  -22.13355,    9.42086,    9.85655,    9.36244,   -5.75583,   21.20298,   -6.69417,   -8.00366,  -43.66351,
+-299.13873,  152.12788,  -11.62845,  -42.77829,  -31.78709,   -0.61493,   -5.46732,    9.17835,   -5.86130,    9.14817,   -5.92584,   -9.86393,  -49.09785,
+-279.24234,  134.64542,  -43.43766,  -36.45545,  -21.69882,  -16.28008,   -5.64642,   13.52591,   -1.48913,   11.07259,  -13.52461,  -16.82686,  -39.25970,
+-260.71298,  136.22865,  -44.57975,  -29.85570,   -0.64486,  -38.38213,  -18.33903,    5.89145,   -3.89952,   12.23704,   -3.22323,  -23.71099,  -20.92175,
+-244.48389,  133.28445,  -43.79402,  -22.31293,    4.11721,  -55.92643,  -25.76093,   17.25046,   -9.47253,    0.42447,   -5.67624,  -16.34599,   -8.27315,
+-258.41626,  114.99637,  -39.90332,    1.50976,    8.22684,  -57.25970,  -31.64858,    1.77440,  -21.77207,   -4.49708,   -6.35045,   -1.23350,   -9.94049,
+-293.08508,   99.34319,  -16.11116,   17.69371,   15.47070,  -62.74282,  -27.08352,    4.12698,  -20.45372,  -10.08534,  -17.34487,   -5.33286,  -20.24701,
+-322.00287,   94.11205,   -0.92652,   37.83418,   21.11624,  -52.57660,  -18.44728,    4.90179,  -30.57641,  -17.70811,  -27.24849,  -12.71308,  -22.68225,
+-360.38376,   87.69393,   15.29972,   68.40858,   17.43580,  -39.63292,   -1.59294,    1.32824,  -37.38486,  -21.12629,  -25.94076,  -13.80269,  -27.75978,
+-401.70505,   90.96419,   29.30173,   78.28263,    3.20327,  -36.78372,    6.16998,   -7.54714,  -28.99337,  -21.18590,  -14.36380,    4.06708,  -33.08428,
+-533.28723,  137.39876,   49.16436,   72.20885,    7.80102,  -24.72589,   -3.93477,  -15.49783,  -23.35331,  -18.65767,  -14.67795,   -4.94262,  -18.59274,
+-559.62939,   99.69369,   62.95110,   83.08356,    7.80114,   13.37880,  -17.57888,   -7.87949,  -14.74117,  -25.68275,  -16.59367,  -13.87330,   -6.59394,
+-555.69965,   67.18806,   52.50816,   79.44274,    2.79971,   21.60576,  -20.30817,  -11.56905,  -17.36839,  -27.29060,  -18.14963,  -12.88813,  -10.80527,
+-659.99139,   90.88692,   74.82683,   53.60613,   17.83161,    7.39822,   -9.09811,  -16.35573,  -20.83104,  -22.41822,  -18.70988,  -18.33362,  -14.70294,
+-679.79871,   83.09547,   68.24111,   49.89501,   30.46873,   14.45928,    3.91992,   -3.76449,   -8.50756,   -9.65265,  -10.31525,  -11.33202,  -11.55049,
+-694.87091,   62.41616,   49.22509,   35.81430,   26.22110,   19.58934,   13.44967,    6.70528,   -0.06228,   -5.94130,  -10.83276,  -15.16693,  -18.84751,
+-704.53339,   49.78053,   40.19272,   32.00471,   26.16905,   21.18073,   16.54406,   11.00089,    3.29591,   -4.95220,  -11.12703,  -14.73515,  -16.49860,
+-704.69830,   50.06087,   41.28493,   33.35400,   26.99703,   20.23875,   13.06462,    6.38402,   -0.09129,   -5.92407,   -9.17170,   -9.74002,  -10.29155,
+-711.46802,   42.55208,   37.13642,   29.97435,   22.53142,   15.76887,   10.13004,    5.37198,    0.79322,   -3.90029,   -8.15694,  -11.21932,  -12.86519,
+-723.13409,   27.21255,   24.66596,   20.84755,   16.27658,   11.48368,    6.89734,    2.77970,   -0.77472,   -3.78793,   -6.32338,   -8.42078,  -10.06509,
+-443.93698,   -1.74865,   -9.79181,    7.66294,  -18.64954,   -5.27232,    4.84549,   -8.48673,   -2.54492,    2.67439,   -6.86648,    9.85871,   -8.21330,
+-298.20532,  -12.81320,   17.29125,    6.56532,  -15.89673,   -1.43100,    8.31953,  -10.98549,    9.37294,    0.49440,   -3.70998,    7.79895,   -6.09313,
+-305.63287,  -56.60079,   37.23157,   15.93787,  -16.12873,    4.72155,    3.36984,   -4.53316,   12.89923,   -6.25745,   -5.42213,    9.98214,   -5.26571,
+-316.28519,  -90.52562,   34.93086,   42.75752,    6.76751,    3.31615,   -5.30959,    6.70043,   21.56020,  -10.07466,   -6.29718,   21.09662,    2.53661,
+-329.53720,  -94.12029,    6.18531,   40.50994,    4.35451,  -12.12563,  -13.27101,    7.24133,   15.15652,  -10.72804,   -3.78922,   17.08343,    4.88286,
+-356.77383,  -52.07880,  -15.00291,   42.41784,   -2.45872,  -20.72002,  -19.36626,    7.91365,    4.72626,   -4.90424,    3.93229,   13.72323,    3.81743,
+-388.43564,  -18.23531,  -27.64051,   39.90910,    6.87263,   -9.67537,  -18.85973,    6.46791,   10.94759,   -4.51365,    8.38604,   13.82587,   -0.89720,
+-387.53278,   42.73358,    6.46609,   43.05339,   20.04885,  -11.90844,  -10.72181,   17.58882,    1.21852,  -11.54529,   -6.57513,    3.74334,  -10.30171,
+-361.15170,   93.01609,   18.34750,   34.17667,   21.55042,  -20.90072,  -19.98984,    8.97430,   -5.22827,   -7.51887,  -19.58678,  -12.16964,  -14.88791,
+-343.59412,  101.69102,   16.34492,   12.79201,   13.97989,  -26.50590,  -36.06129,    2.05754,    0.81474,   -9.14219,  -33.64646,  -17.92052,  -23.68695,
+-335.24365,  113.71022,   19.48219,    0.26569,   10.15661,  -21.69938,  -33.86705,  -11.23172,   -0.19690,   -1.78176,  -14.47228,   -4.76587,  -17.37306,
+-299.25641,   99.93545,   -9.58726,   -9.46401,    9.96207,  -10.53954,  -35.09438,  -15.00333,   10.62438,   -2.10113,   -6.79275,   -6.00083,  -22.96282,
+-300.10519,   78.59589,   -6.51480,  -16.16937,   -0.94357,  -15.15780,  -43.71510,  -21.76927,    1.60866,   -1.99735,   -8.69847,  -12.43583,  -22.55209,
+-316.80902,   93.42959,   -0.82785,  -12.93124,  -14.51613,   -7.83749,  -37.18201,  -28.08782,   -6.06824,    4.80437,   -6.88215,  -16.59713,  -22.29524,
+-315.29916,  115.42698,   -1.13148,  -10.75052,  -13.06001,   -7.73907,  -24.59613,  -41.00932,  -12.86884,    5.27404,   -5.81072,   -9.62144,  -26.91442,
+-314.75229,  124.12968,   -2.79560,  -17.03026,  -28.89275,   -8.95922,   -6.59420,  -40.73174,   -9.84872,    5.15444,   -4.32795,    1.96284,  -24.17693,
+-315.53088,  125.58240,    1.15098,  -21.96985,  -34.33059,   -3.54398,   -6.85529,  -44.57242,  -21.80232,    7.20194,   -8.49010,    4.75513,  -28.38153,
+-316.27704,  122.26854,    9.06494,  -27.00614,  -40.43912,    0.67930,   -6.98057,  -49.63099,  -22.58922,   -2.52189,  -11.50818,    9.91990,  -27.45758,
+-327.35529,  117.16550,   21.98269,  -17.33347,  -38.99503,    6.68010,   -1.65017,  -50.80903,  -26.47403,   -4.14860,  -13.12196,    7.10704,  -30.10763,
+-321.23111,  106.84170,   34.31313,   -3.68023,  -30.01686,    5.29732,   -6.57700,  -42.91172,  -27.53443,    4.61466,  -15.36086,    3.52533,  -30.84628,
+-324.52029,  105.49486,   25.03466,   -2.60415,  -28.69174,   -0.12828,  -14.06143,  -39.43727,  -33.95476,    5.75513,  -19.13151,   -2.93614,  -36.38286,
+-317.18118,  106.27744,   28.27160,    3.54661,   -1.14089,  -12.54203,   -3.93085,  -28.66591,  -23.39810,    4.34814,  -23.34138,  -20.87079,  -33.98920,
+-327.57474,   93.39848,   30.33432,   12.20178,   10.78983,  -17.81099,   -5.48535,  -11.80645,  -23.51105,   -2.96250,  -22.57767,  -28.34293,  -28.73708,
+-350.31647,   85.08986,   36.08148,   28.87788,   15.04847,  -26.97350,  -11.33844,    2.60015,  -23.01023,  -16.96572,  -23.07828,  -27.98988,  -21.04955,
+-376.05798,   72.38734,   42.25750,   45.36614,   23.51564,  -37.08350,   -7.10778,    5.91058,  -30.25048,  -26.47700,  -17.96576,  -24.60368,  -30.84109,
+-404.26224,   65.94995,   51.06128,   50.59633,    6.64929,  -35.64878,    6.40939,    9.16480,  -41.30687,  -34.50303,  -11.83757,  -20.59805,  -30.04825,
+-443.86591,   52.20867,   70.20848,   71.01749,   11.28605,  -30.21029,    5.79169,    0.37297,  -39.64886,  -34.88003,   -4.04763,  -22.88266,  -22.50964,
+-487.70062,   47.62605,   94.98031,   83.99281,   14.70511,  -14.94850,   11.23262,   -5.88986,  -38.03238,  -40.28688,  -16.84157,  -20.98334,  -20.05816,
+-509.07642,   45.77028,  109.92744,   81.87650,   13.95934,   -3.93742,    4.11268,   -9.91554,  -32.80961,  -30.89524,   -9.98727,  -14.64280,  -30.88258,
+-522.71735,   37.61763,  103.68540,   78.68625,   16.01972,    4.56800,   11.92203,  -22.13283,  -29.86579,  -30.24809,  -12.04046,  -10.16577,  -26.49791,
+-508.07632,   30.67447,   94.83904,   71.36203,   24.34888,    7.39304,   20.89282,  -12.73135,  -29.18987,  -23.50452,  -10.04839,  -18.02681,  -21.11977,
+-481.59796,   31.90848,   78.80377,   63.13389,   25.43876,    3.49658,   18.26456,   -5.80285,  -22.66960,  -22.49039,  -17.23462,  -30.48423,  -30.81418,
+-456.96207,   45.49911,   62.68452,   71.53724,   20.47784,  -28.85520,    8.76649,    4.74806,  -28.93201,  -33.62452,  -22.59154,  -30.95151,  -38.00834,
+-438.81244,   64.58253,   53.13954,   72.63129,   29.09319,  -37.28661,    3.45708,   15.12676,  -35.47618,  -49.79375,  -16.69294,  -19.46603,  -34.32941,
+-423.95709,   80.28170,   27.77286,   54.04255,   39.76050,  -35.75346,   -3.38681,   12.29888,  -34.48137,  -55.19286,  -26.09116,  -20.89586,  -38.82964,
+-403.37570,  104.78390,   24.31571,   33.62110,   43.22993,  -32.68417,   -9.63470,   14.40468,  -29.44122,  -32.80064,  -27.87226,  -12.14832,  -21.32968,
+-410.64990,  113.51939,   21.66384,   25.88121,   47.00960,  -13.49685,   -1.19114,    2.34893,  -34.86524,  -13.70483,  -30.08947,  -20.01185,  -20.18216,
+-427.86404,  115.69196,   16.82162,    8.77996,   38.88261,    4.62155,   10.18594,  -17.62808,  -42.41760,  -11.70966,  -29.65791,  -37.22931,  -38.67917,
+-434.29660,  124.90443,   30.81101,   -1.94971,   34.90073,    6.41934,   21.23090,  -18.56267,  -52.61283,  -14.50310,  -15.40610,  -29.62020,  -44.02092,
+-454.99860,  127.80815,   49.49559,   -4.71944,   25.74934,    7.45291,   30.37157,  -19.19380,  -64.06045,  -27.05679,  -21.07340,  -19.17959,  -28.23726,
+-461.24274,  134.59407,   68.29501,  -15.07210,   12.94805,   -5.15533,   36.96617,   -8.80068,  -58.84946,  -30.21778,  -30.54121,  -18.83973,  -18.17796,
+-479.60336,  144.00020,   72.91606,   -9.99038,    1.21133,   -7.48206,   32.60101,    1.88745,  -43.98568,  -33.80843,  -34.62660,  -27.18499,  -11.38533,
+-505.77020,  149.12779,   79.25552,   -6.04475,   -6.48688,   -8.97785,   32.86127,    1.98450,  -35.43898,  -35.60764,  -33.81352,  -25.21284,  -15.70421,
+-513.59387,  154.25208,   83.21911,   -4.70393,   -4.05527,  -10.33386,   28.84863,    3.08010,  -28.15909,  -32.38344,  -26.96810,  -31.97985,  -30.05245,
+-530.85712,  154.80107,   93.09445,    2.43585,  -10.55350,  -12.89960,   16.57527,   11.23611,  -14.67146,  -28.25472,  -30.90336,  -29.62308,  -33.68346,
+-545.02936,  157.65283,   85.82580,    4.90056,    1.98147,   -2.92201,   10.70440,    6.61972,  -24.46090,  -30.23070,  -33.71283,  -23.79030,  -31.37379,
+-549.22186,  155.82300,   71.25284,    2.24713,   14.28724,   -5.89741,   11.94555,    8.34093,  -16.32330,  -24.46994,  -35.22298,  -26.59025,  -29.61477,
+-555.90649,  153.10989,   36.65089,    3.84110,   16.78571,    6.35293,   10.35865,   -2.39851,  -21.93614,  -28.16301,  -27.44939,  -19.97972,  -20.11437,
+-557.76038,  144.97772,   20.05028,   -3.40184,   25.23508,   22.33108,   24.19692,   12.85208,  -13.60905,  -24.50617,  -32.56139,  -24.60412,  -19.16363,
+-565.04395,  141.75775,   23.26726,    1.44345,   24.35340,   23.82019,   24.81945,    8.61719,  -14.13487,  -16.99150,  -26.64034,  -16.34756,  -16.28925,
+-582.35852,  121.70802,   11.77862,    1.21382,   33.49820,   31.02796,   19.44608,   -4.32764,  -22.20745,  -13.69146,  -14.79044,   -1.90694,   -9.58287,
+-611.60425,  108.65694,   15.34908,    4.63596,   34.72903,   35.42127,   20.02589,   -6.36097,  -25.55186,  -17.19771,   -2.10331,   -5.88407,  -30.63498,
+-633.38940,   90.24216,   13.99240,   10.41656,   33.89569,   11.58191,  -12.36133,  -23.82138,  -21.01793,   -6.91555,  -11.57736,  -33.45514,  -32.55983,
+-638.89941,   90.62136,   31.36757,   37.06882,   41.14146,    0.38717,  -15.60635,   -5.89425,   -6.65146,  -10.05258,  -17.95158,  -25.66393,  -20.19134,
+-645.22919,   84.56592,   46.39761,   50.55693,   33.34886,  -18.71098,  -18.46934,   -6.45251,  -23.79868,  -33.81595,  -24.85833,  -19.18478,  -21.62932,
+-663.24762,   70.47253,   47.16746,   42.61106,   24.64014,  -20.18022,  -14.29461,   -8.25118,  -33.31351,  -29.12968,  -15.16687,  -13.82747,  -26.03916,
+-665.23572,   68.91484,   48.07513,   45.93549,   24.35101,  -22.22397,  -14.34225,   -7.24802,  -31.21640,  -26.50978,  -10.99041,  -13.64789,  -25.31266,
+-653.87531,   71.60163,   43.54386,   43.93507,   16.09889,  -32.49047,  -11.13548,   -4.03882,  -32.00384,  -24.84754,   -6.49042,  -19.24341,  -32.97263,
+-628.96997,   80.97196,   43.75434,   42.96486,    5.35637,  -46.30515,  -11.74355,   -5.22698,  -34.61658,  -31.77387,   -6.11563,  -21.50899,  -28.13975,
+-560.27863,   98.65417,   57.04360,   53.12490,   13.60819,  -37.92327,   -2.64569,  -10.87142,  -33.09066,  -27.77287,  -13.27444,  -24.04862,  -38.21613,
+-474.16153,  133.19215,   42.96102,   52.72581,   29.01019,  -33.03656,   -3.30715,   -8.54953,  -37.34371,  -24.96823,   -7.55104,  -21.57481,  -42.22615,
+-420.70679,  127.66073,   44.20655,   51.52517,   21.18233,  -36.80200,    1.18547,   -8.64668,  -31.38762,  -30.50883,   -6.14316,  -16.59294,  -44.98491,
+-392.38339,  118.34142,   35.92733,   60.44886,   14.10930,  -42.87946,   -6.38012,   -8.52512,  -43.05403,  -23.24982,  -14.38810,  -17.42284,  -28.54035,
+-396.14377,   91.24722,   33.84401,   68.58575,   13.68457,  -47.81595,  -10.30057,  -14.74097,  -43.99838,  -23.68055,   -6.89345,  -22.21245,  -22.73807,
+-375.81790,   84.50850,   37.38282,   64.55530,    9.26053,  -38.50275,   -6.90849,   -2.34685,  -39.99230,  -36.20174,  -11.40772,  -19.35306,  -40.25445,
+-371.31277,   86.60709,   26.88540,   60.70909,   13.63760,  -43.42980,  -12.22658,    0.11664,  -36.24310,  -31.65624,   -7.84330,   -6.56514,  -38.04145,
+-395.22690,  113.64909,   34.76160,   69.25298,    8.00502,  -51.12570,  -14.85159,   11.17280,  -27.47415,  -31.16954,  -15.57132,    2.17963,  -37.04792,
+-487.96347,  136.72539,   48.62033,   72.58236,    4.90202,  -34.16551,   -5.12197,   16.65520,    2.30252,  -26.79140,  -12.89626,   -9.70030,  -28.38115,
+-491.70081,  126.93477,   11.16320,   82.84847,   26.42068,    7.49833,   -2.51091,  -18.77224,  -12.22970,  -37.88863,  -36.79497,    3.01517,  -17.09411,
+-362.33643,   91.61451,    0.37455,   63.82765,   10.41564,   -0.94651,   -7.24294,   -5.33058,   -7.09231,  -31.30616,  -27.50639,   15.18285,  -14.38406,
+-360.52228,   96.28909,   25.21511,   62.68855,    4.91354,  -27.48291,  -18.13877,    1.86747,  -10.94692,  -27.01387,  -17.22662,   -2.00933,  -22.37263,
+-378.26196,  118.54053,    5.21113,   56.28373,    9.19872,  -38.01694,  -14.65021,    6.69412,  -28.87903,  -25.22076,  -15.05834,   -5.24249,  -24.69993,
+-370.16278,  114.58689,   -0.28570,   44.82927,   13.10141,  -42.67530,  -17.02056,   -0.04590,  -34.04948,  -21.32712,  -20.10828,   -6.89947,  -22.48342,
+-337.19305,  118.63881,   -3.91941,   22.77721,   19.59201,  -45.92442,  -31.26322,    2.50052,  -38.53117,  -19.03586,  -24.08696,   -8.85186,  -17.54709,
+-310.57822,  113.03319,   -9.06078,   25.27596,   11.55997,  -40.19213,  -39.65878,   -1.88564,  -33.22710,  -26.07192,  -22.41040,   -9.94286,  -18.82849,
+-311.33539,  103.68066,   -1.11024,   25.31038,    1.24793,  -40.32587,  -44.05034,  -10.81609,  -25.39709,  -27.82468,  -24.11249,  -15.82641,  -10.58237,
+-306.09283,  107.53571,   -5.71102,   15.36855,   -0.06519,  -32.78188,  -39.53238,  -17.80904,  -23.24977,  -21.61374,  -22.77439,  -18.94307,   -4.31926,
+-315.72760,  116.47948,  -11.70051,   10.31159,    6.36807,  -23.05539,  -34.21939,  -21.04945,  -18.21829,  -12.44790,  -29.04151,  -16.46194,   -3.99485,
+-335.40533,  122.68563,   -2.73751,   14.60683,    7.67535,  -21.95919,  -25.67969,  -26.21349,  -14.40325,   -9.32450,  -27.60385,  -19.34495,   -2.29705,
+-344.91006,   84.22801,   30.53672,   24.76007,   -7.96196,  -12.27603,  -28.83938,  -31.28870,   -7.10280,  -10.32913,  -26.31890,  -28.76020,  -10.16101,
+-321.74847,   22.61870,   67.62318,   22.66578,  -16.89212,    1.31448,  -41.29454,  -20.61598,   -4.62421,  -11.44328,  -19.23549,  -23.71359,  -19.15634,
+-320.57489,  -10.41486,   99.94828,   33.88570,   -1.77472,   28.93340,  -27.08552,  -21.86935,    2.15045,   -5.06805,  -18.29232,  -16.90594,  -15.15376,
+-325.89075,  -29.47031,  110.55493,   38.55762,    4.04195,   34.33043,  -26.90088,  -28.04365,   -4.17569,   -8.46432,  -19.69166,  -15.66487,   -7.47089,
+-336.47360,  -56.66702,   86.97776,   19.25002,   -2.79542,   22.82478,  -15.66427,   -0.37997,   10.49368,  -13.39484,  -25.38358,  -28.94642,  -16.30313,
+-323.85843,  -59.53122,   89.57014,   27.79789,   -0.33431,   17.84040,  -15.93314,    3.56822,   14.33489,  -12.46129,  -23.75436,  -14.04394,   -7.38174,
+-324.96545,  -75.17782,   83.45936,   19.04148,    1.78696,   22.14714,  -13.65047,    2.92450,   19.27539,   -2.63968,  -20.18465,  -11.90597,   -7.07332,
+-344.67255,  -79.84637,   80.44215,   23.84577,    0.79056,   25.22506,  -11.30942,   -1.17479,   24.93149,   -9.26643,  -17.22021,  -11.04599,  -16.54391,
+-375.36279,  -70.17471,   71.28268,   31.66566,   -7.46310,   13.73584,   -8.19228,   -9.15990,   12.89818,  -15.77331,  -23.41481,  -21.19809,  -21.89870,
+-412.11044,  -54.72255,   76.23882,   54.27297,    6.91382,   30.43379,   -0.04433,   -9.83065,    9.14938,   -3.71777,   -9.83891,  -10.77449,  -26.20202,
+-426.38629,    8.12202,   50.87331,   45.91777,   54.90336,   48.59229,  -14.05536,  -15.14324,   -3.37800,  -22.35563,  -19.86466,  -21.13762,  -22.59322,
+-483.17102,   95.93178,   29.22441,   35.87289,   58.79867,   21.23550,  -26.27444,  -23.48962,   -8.57341,  -29.70831,  -11.57309,  -18.66440,  -26.72654,
+-539.40594,  139.17160,   40.84406,   23.59950,   31.91476,    2.91482,  -27.79764,  -16.67805,  -36.13083,  -23.88976,   -6.00165,  -11.89510,  -23.69569,
+-550.31390,  146.30614,   46.67932,   37.03757,   44.81111,  -11.20804,  -17.97526,  -12.71701,  -36.34298,  -16.29894,   -2.04527,  -12.37814,  -25.78544,
+-550.08276,  142.76784,   43.96206,   30.36060,   46.46640,  -14.24652,  -13.37420,  -10.16631,  -33.66499,  -15.89499,   -3.76226,  -12.20420,  -20.20636,
+-547.64502,  144.29440,   43.40225,   36.58519,   45.52866,  -15.76632,  -11.51921,   -6.43776,  -38.47662,  -22.12944,   -9.12653,  -17.88100,  -19.03604,
+-530.69507,  150.77492,   39.30650,   34.92780,   48.19412,  -12.40477,  -13.61568,   -5.13870,  -44.81704,  -15.71391,   -6.85942,  -16.08999,  -23.19726,
+-531.73352,  148.31209,   40.27899,   35.28296,   51.28910,   -6.96135,  -11.41517,   -3.96197,  -44.39988,  -16.34683,  -13.13658,  -22.45674,  -23.91177,
+-528.42450,  148.31938,   43.70385,   33.69122,   47.01711,  -12.27729,  -16.49473,   -4.52656,  -41.69998,   -9.85081,  -11.73811,  -20.82237,  -22.63989,
+-524.53143,  152.69884,   41.16589,   21.22641,   41.17306,  -20.25027,  -21.31323,  -11.35616,  -40.42639,  -20.23978,  -14.69206,  -24.46453,  -22.25762,
+-533.03210,  141.29987,   46.16906,   21.69607,   45.43491,  -20.68425,  -27.40121,  -15.02421,  -35.13165,  -17.41097,   -7.54792,  -13.94088,  -19.49087,
+-504.09952,  108.60211,   45.97687,   37.09571,   17.71260,    5.86324,  -19.76870,  -17.16998,  -17.81991,  -28.20092,  -15.53811,   -4.63092,  -19.00638,
+-467.63785,  132.16171,   32.52523,   24.28167,    7.25442,   -0.63801,  -21.26180,  -24.28684,  -17.23636,  -23.55528,  -10.52019,   -9.29675,  -22.95332,
+-340.47134,  150.94380,   -5.40431,  -12.30841,  -14.63590,    3.29749,  -28.80212,  -23.54673,  -14.12508,  -13.87720,  -13.41590,  -17.71863,  -26.09510,
+-311.39139,  142.51981,  -31.06075,  -35.87051,  -10.02770,  -12.16096,  -18.78892,  -32.56214,  -13.80549,   -4.23693,   -2.31403,  -11.90334,  -32.74061,
+-309.15152,  124.61391,  -50.81188,  -38.51775,  -11.46098,  -20.72153,   -1.91621,  -40.46830,  -10.70336,    8.67581,   -1.28233,   -2.77227,  -38.88427,
+-310.65503,  124.23444,  -52.91621,  -40.31299,  -19.68514,  -23.62526,   11.31189,  -37.97126,  -13.66395,    6.95346,  -12.45460,   -3.08454,  -33.13107,
+-298.31958,  140.30038,  -60.75840,  -43.61348,  -29.72572,  -25.76282,    7.95005,  -35.32393,  -20.30392,    7.45490,  -14.43555,    5.52338,  -33.68083,
+-310.68411,  143.88361,  -43.75583,  -32.23655,  -43.31153,  -16.77195,    5.56395,  -30.31214,   -8.32603,   14.51808,   -9.60608,   11.15491,  -39.21754,
+-306.63290,  132.73161,  -40.05501,  -36.98214,  -53.05022,  -14.40135,    1.52464,  -39.32082,   -6.91366,   13.03398,   -8.60260,   11.39476,  -38.40683,
+-293.21274,  114.66545,  -52.86196,  -43.43966,  -49.01334,   -8.87678,    4.17357,  -36.09983,   -4.14086,   10.07351,   -3.47874,   12.86056,  -42.30226,
+-260.88785,  118.68002,  -46.41621,  -40.20872,  -33.20874,    1.55962,   20.05049,  -30.44283,   -3.89125,    6.33120,   -0.51390,   13.73246,  -42.64820,
+-248.72377,  125.34203,  -53.03514,  -45.62149,  -33.05993,   -4.37795,   12.62489,  -26.28844,   -4.05405,    0.75630,    2.51615,    6.27459,  -47.92072,
+-249.81819,  130.63123,  -62.14239,  -48.99425,  -34.38806,  -14.66482,    1.07221,  -27.83782,   -4.74374,   -0.83245,   -0.56152,   -0.98220,  -48.11487,
+-254.00978,  139.72881,  -59.63356,  -36.40792,  -26.36911,  -17.42650,  -19.30721,  -22.03000,   -0.97694,    7.48954,    3.50996,   -3.00547,  -50.41830,
+-259.76166,  154.54689,  -53.19246,  -16.99224,  -17.35365,  -11.15831,  -24.09927,  -17.77573,   -1.32666,    1.86994,   -4.33618,  -14.71345,  -36.14102,
+-309.31085,  176.01402,  -48.16504,    1.92940,   -9.54937,   -7.85120,  -30.84069,  -27.12883,   -6.02949,   -7.01642,   14.15697,  -17.98174,  -17.55932,
+-440.07883,  184.48796,  -10.16064,   18.17631,   16.42781,    0.29587,  -16.35007,  -20.31314,   -5.56250,   -0.70789,   13.50835,  -11.07479,  -10.24114,
+-512.24915,  192.43919,   20.11260,    9.00326,   24.41474,   -9.59827,  -25.85304,  -11.96010,   -2.19711,    6.27681,    9.08973,  -10.23279,  -17.25590,
+-545.09546,  175.38416,   27.32856,    9.03898,   28.14956,   -3.71783,  -21.98946,   -5.63140,   -3.17746,   -8.66310,    0.45472,   -1.61766,  -15.34482,
+-616.07855,  134.16933,   52.32460,   13.84887,   17.44582,   14.40052,   -4.27584,  -11.32673,    0.50363,   10.12419,    2.44672,  -13.78299,  -21.20637,
+-659.68634,   99.49026,   60.61839,   28.53442,   10.91960,   -0.92757,   -9.31487,   -8.36906,   -0.55846,    2.12741,   -4.53421,  -10.89314,   -8.77946,
+-659.05249,   96.39398,   51.13544,   18.32431,    7.06392,    6.12628,    6.43870,    3.02480,   -6.60868,  -16.90468,  -19.56609,  -15.09356,  -10.13031,
+-663.96637,   88.90488,   44.31673,   15.60063,    9.49454,   12.31807,   15.58250,   12.86089,   -1.57275,  -18.46472,  -21.38613,  -11.58452,   -3.48439,
+-705.61469,   48.35851,   37.74590,   26.64371,   18.58700,   13.84768,   10.08821,    4.86532,   -1.35519,   -6.30758,   -9.04604,  -10.25949,  -11.49900,
+-712.27972,   37.26599,   25.93010,   20.55217,   21.43903,   20.69118,   14.25117,    5.64500,   -0.06065,   -2.23915,   -3.81368,   -6.35554,   -9.13893,
+-722.03278,   26.32119,   20.04255,   16.95817,   17.41955,   16.11649,    9.82035,    1.73084,   -3.03804,   -3.87100,   -4.45093,   -7.37119,  -11.27701,
+-726.48999,   22.07387,   19.45728,   17.36243,   15.53367,   12.31727,    7.63438,    3.50081,    1.23575,   -0.28905,   -2.63978,   -5.22975,   -6.21708,
+-731.08289,   15.97149,   14.62836,   13.32377,   12.42178,   10.31140,    7.56272,    4.76615,    3.10965,    1.21597,   -1.16498,   -3.99053,   -5.54179,
+-639.79535,   51.71355,   -2.33925,   35.18297,   36.18015,   -6.28807,  -26.16285,   -0.99254,    5.17867,   -0.96490,   10.68179,    1.95577,  -11.31189,
+-342.22736,   74.86382,  -12.04188,   24.78247,   23.38870,   -5.64898,  -20.37819,   15.64958,   14.62847,  -11.82782,   -6.57900,   -3.53603,  -18.39786,
+-316.12149,   82.72071,  -19.31676,    7.00616,   11.48070,   -2.32272,   -9.79769,    9.02850,    8.73732,  -12.53925,  -13.31921,    3.73772,  -14.35109,
+-373.67056,   90.69460,  -30.11087,  -15.01380,  -11.97342,   -7.14060,   11.48161,    9.99608,    5.34673,    2.11251,   -9.52344,   11.62454,  -14.17081,
+-402.84872,   92.72632,   -4.53706,  -17.97655,  -24.18877,  -15.23140,   20.55283,    4.26547,   -1.90602,   16.05496,   -8.39785,   17.63147,   -7.58050,
+-408.00958,  104.34008,   22.46475,   -2.44329,  -14.62350,  -17.33055,   15.03711,  -14.86941,  -20.18334,   24.46267,  -28.20920,    3.93071,  -20.25869,
+-405.76697,  147.20627,   56.66483,    4.83606,  -36.89982,  -27.85701,    6.59308,  -20.22911,  -28.79591,   17.14229,  -37.56203,    8.35426,  -20.11530,
+-405.19531,  186.35690,   56.11123,    0.79771,  -63.96230,  -45.93291,    7.61355,   -7.51543,  -15.41443,   26.49616,  -35.26764,  -12.33839,  -28.09844,
+-415.56647,  186.05031,   44.84282,    8.57117,  -75.28368,  -39.46779,    8.06600,   -5.73941,  -16.75528,   23.65435,  -33.09233,   -8.24646,  -20.35406,
+-410.48975,  169.63432,   47.38409,   11.42275,  -82.42666,  -35.03646,    4.36853,  -12.49395,  -26.03498,   14.63939,  -41.48138,    3.48123,  -21.27683,
+-389.01846,  181.50140,   31.39800,   -0.73777,  -86.05359,  -40.73767,    1.15004,  -10.21134,  -17.95034,   12.57470,  -51.81875,    5.42645,  -29.16546,
+-381.36578,  190.97713,   16.56431,  -18.99318,  -74.91054,  -40.46955,    9.64043,    1.46569,   -9.14612,    3.86484,  -37.25309,    0.22394,  -36.43250,
+-372.44888,  197.29846,    6.56907,  -36.48407,  -43.23312,  -28.98909,   10.33254,    6.20580,  -12.04798,  -30.40288,  -25.32218,    7.00081,  -30.52537,
+-349.32806,  191.64467,  -14.09583,  -48.44706,  -17.40703,  -16.26879,   12.72570,  -10.09641,  -22.76518,  -50.48253,   -6.45452,    9.54748,  -21.54997,
+-329.09549,  185.53635,  -23.96144,  -48.33319,   12.20624,   -9.46398,   -1.35701,  -26.31596,  -37.73962,  -40.88706,   14.07668,   12.47700,  -22.13748,
+-332.91913,  172.24333,  -31.32884,  -38.86584,   27.30210,  -13.64118,  -27.55054,  -36.44904,  -34.19913,  -35.77820,   10.24152,    2.61219,  -29.05568,
+-337.38162,  159.34306,  -21.24244,  -19.63639,   37.31166,  -10.48006,  -53.06920,  -30.64360,  -13.26747,  -36.61865,   -3.26523,   -5.97771,  -30.19121,
+-345.73712,  148.88217,  -10.56683,    3.55900,   36.27192,  -20.22350,  -64.35509,  -16.89741,   -1.25741,  -43.80673,  -11.09250,   -4.66476,  -29.39306,
+-347.59207,  124.05372,    0.06424,   26.65538,   27.07813,  -24.55914,  -52.08172,   -9.33817,    0.87643,  -52.62959,  -13.25934,   -4.64068,  -38.93477,
+-354.77094,  107.03270,   18.57542,   39.82758,   21.07135,  -20.15565,  -32.71638,    3.82454,    3.26863,  -49.43832,  -18.47179,   -1.33855,  -46.21080,
+-356.82126,  101.29904,   31.65295,   48.51870,   25.42647,  -24.41973,  -17.62938,   -3.69525,   -6.80475,  -37.40614,  -16.25546,    3.79310,  -46.79686,
+-365.47476,   96.48830,   37.55275,   52.13327,   27.34974,  -23.96722,  -19.71255,   -2.85377,  -15.44656,  -31.10847,  -11.01326,    2.28878,  -38.16014,
+-359.25830,  100.04993,   35.95026,   43.66024,   34.67427,  -23.88166,  -25.05778,    6.28581,  -13.95853,  -29.20864,  -14.47804,   -4.28369,  -38.17802,
+-355.16251,   85.51714,   36.95945,   47.05676,   35.13097,   -0.48355,  -26.01568,    8.33860,   -0.76428,  -32.37434,  -24.03978,  -12.23794,  -24.79102,
+-345.82358,   53.52118,   50.42019,   43.07238,   20.82985,    2.91177,  -32.40328,    1.76590,   -0.75600,  -17.39132,  -24.83293,  -24.84604,  -18.59051,
+-324.37042,    6.48494,   81.98193,   59.58132,   13.82420,   18.43753,  -21.45969,    7.82956,    4.68797,  -16.76673,  -28.63046,  -22.80368,  -24.61898,
+-338.64651,  -20.17721,  102.06671,   56.37870,   10.54378,   17.93890,  -12.79192,    3.72052,   19.59299,  -18.45952,   -5.52712,  -10.50150,  -14.15830,
+-337.57449,  -38.65701,   94.49697,   55.52618,   16.52288,   26.58708,   -6.60422,    5.20680,   32.34816,  -10.49484,  -13.71105,  -17.84119,  -20.07737,
+-337.04196,  -30.74304,   83.06897,   40.65010,   20.80648,   36.09961,   -4.79771,    2.75117,   20.44530,    4.15405,   -9.48995,  -14.69443,  -17.01763,
+-336.17767,  -17.02739,   78.98351,   36.36460,   16.83785,   35.33120,   -8.01113,   -1.16322,   14.32056,   -6.75638,  -18.05978,  -11.20271,  -19.45627,
+-328.07965,    9.37859,   76.33435,   36.29006,   12.61380,   35.82531,  -24.38402,  -17.90148,   19.43253,   -9.25912,  -23.28895,  -17.65702,  -21.09180,
+-343.16101,   44.98497,   70.59290,   36.67994,   10.65938,   33.58757,  -31.46309,  -21.07189,    4.88002,  -12.56131,  -15.16683,  -23.74206,  -22.84709,
+-363.25851,   89.59911,   49.30536,   34.12471,    2.75580,   27.73447,  -31.61609,  -24.36949,    3.89285,  -13.43786,  -23.47494,  -20.88989,  -31.23467,
+-379.68423,  119.24084,   18.05355,   26.64109,   15.60474,   30.37321,  -26.09801,  -25.50461,    9.00627,  -30.22595,  -19.81620,  -21.95905,  -38.36639,
+-384.54666,  126.66396,   17.38682,   14.13732,   27.81340,   14.06393,  -21.93423,  -27.65716,    9.35011,  -42.40445,  -17.44700,   -9.82522,  -30.37819,
+-411.82861,  155.06801,    7.81988,   25.71399,   10.58142,   13.98584,  -20.39351,  -35.70902,   12.12937,  -33.90580,  -11.49447,  -10.75090,  -42.80706,
+-471.00290,  176.11607,   19.88964,   31.80215,   18.70531,   17.40353,  -24.72796,  -23.13475,    6.21466,  -30.76181,   -7.90854,  -19.20424,  -46.07262,
+-542.09521,  178.40500,   50.89020,   33.66317,   49.60354,   18.17146,  -17.65844,  -21.48166,  -25.04848,  -32.41996,  -11.10730,  -10.62945,  -27.44338,
+-550.66718,  182.16162,   54.56665,   27.04737,   54.47807,   31.37173,   -6.15687,  -21.02319,  -22.85120,  -24.10654,  -19.48920,  -21.63028,  -22.44670,
+-552.81726,  182.10162,   61.26754,   35.03073,   55.85181,   30.13055,   -5.99490,  -20.07623,  -22.07219,  -15.74288,  -13.91844,  -20.16744,  -18.97942,
+-544.18488,  182.36761,   52.82781,   36.16469,   61.39413,   25.15055,   -9.71565,  -14.11956,  -18.60657,  -17.49442,  -10.51644,  -15.87521,  -21.88856,
+-545.78558,  179.03738,   48.88223,   33.75148,   62.84162,   29.09242,   -6.05286,  -11.99398,  -17.85251,  -15.68026,   -6.50234,  -12.05964,  -23.38323,
+-556.52777,  174.80112,   53.80671,   37.43137,   57.72126,   27.46006,   -3.14888,   -9.04027,  -15.06365,  -11.37029,   -2.66824,  -11.51523,  -24.39712,
+-549.42841,  171.00031,   42.87871,   42.80640,   66.08316,   25.29501,    0.87917,   -1.86104,  -17.41379,  -17.45526,    0.62682,   -9.37598,  -19.51379,
+-550.77026,  167.68494,   41.31839,   47.43911,   71.51474,   23.08547,   -3.54858,   -2.01599,  -12.36597,  -15.21634,    0.98496,   -9.53740,  -16.19566,
+-562.23907,  162.02208,   49.44933,   45.88921,   66.58150,   24.05586,   -8.63106,   -3.62967,   -6.04240,   -3.92589,   -1.60562,   -7.13539,  -11.40836,
+-567.48071,  162.70137,   47.84641,   42.31034,   64.06474,   30.14901,   -2.78371,   -6.70905,  -10.95013,   -6.79315,    1.08683,   -7.31497,  -17.66081,
+-592.04895,  154.20142,   67.06769,   50.96314,   57.81846,   34.39915,    9.02027,   -1.97011,   -9.80261,  -11.53745,  -13.48275,  -23.37712,  -21.63716,
+-613.96185,  142.23486,   78.04945,   54.53412,   50.37206,   35.27765,   16.71364,   -0.16995,  -10.51157,   -7.81608,   -7.27338,  -18.19815,  -22.58065,
+-623.37231,  130.30595,   76.90309,   64.94444,   58.26167,   31.72890,   12.44767,    0.89875,   -7.31113,   -8.87330,  -11.26855,  -16.62859,  -12.09391,
+-526.01825,  145.78355,    8.95888,   52.16813,   28.99513,   -5.08013,   13.08779,   14.38430,   -0.01122,  -10.31202,   -7.53272,   -7.29460,   -3.43278,
+-539.64673,  138.42181,   12.10800,   48.64116,   26.21812,  -13.02563,   10.41339,   16.91183,    3.32470,   -6.08288,   -2.11641,   -6.44468,   -5.93290,
+-589.82617,  122.51987,   21.44257,   41.49991,   42.71370,    3.13264,   10.15284,   24.20997,   14.37974,    9.17556,   -4.46364,  -21.85761,  -16.65999,
+-615.26050,  104.61245,   14.61877,   24.35236,   35.20853,    5.54551,    6.43138,   22.46386,   18.52065,    9.82759,   -5.80420,  -27.75697,  -25.72497,
+-655.27783,   74.97298,   18.20997,   29.73637,   36.75604,    5.38053,   -3.63343,   13.44123,   18.08150,    6.55374,  -10.95804,  -22.64592,  -16.10758,
+-683.23322,   59.71490,   28.18122,   30.36807,   34.32085,   18.50830,    7.70230,   11.60670,   14.50619,    7.28390,   -7.20995,  -17.80128,  -17.05165,
+-709.21967,   34.49238,   17.61381,   18.90655,   19.67831,    7.15575,   -1.46977,    1.13926,    2.01570,   -3.91360,  -10.35604,  -12.89073,  -11.23378,
+-722.60254,   27.00973,   23.59079,   21.18510,   18.54302,   13.69143,    8.24810,    4.72475,    2.12363,   -1.78975,   -6.01705,   -8.09017,   -8.43185,
+-728.82135,   19.60254,   18.29690,   16.35365,   14.01246,   11.45263,    8.74696,    5.88674,    2.85985,   -0.26681,   -3.31179,   -6.01459,   -8.12518,
+-734.40045,   11.88858,   11.12416,   10.03389,    8.64481,    6.90780,    4.89620,    2.83119,    0.88499,   -0.93537,   -2.64846,   -4.13302,   -5.20602,
+-731.09973,   16.23312,   15.63525,   14.62359,   13.13073,   11.01530,    9.38444,    7.01467,    4.70279,    2.58447,    0.73818,   -1.51889,   -2.83358,
+-735.99713,    9.36690,    9.22009,    8.00824,    7.25840,    5.54003,    4.38853,    2.40805,    1.12161,   -0.83802,   -1.98246,   -3.65658,   -4.42722,
+-738.08246,    6.74858,    6.11811,    5.14963,    3.94640,    2.62023,    1.27304,   -0.01320,   -1.17665,   -2.16874,   -2.94603,   -3.46665,   -3.69432,
+-734.35925,   12.00047,   11.33683,   10.30780,    8.95957,    7.32947,    5.49645,    3.58400,    1.70265,   -0.08561,   -1.73316,   -3.16493,   -4.30174,
+-734.30115,   11.98556,   11.23580,   10.41712,    9.47419,    8.09429,    6.28417,    4.45523,    2.87658,    1.35736,   -0.34371,   -2.03668,   -3.31543,
+-738.17822,    6.62347,    6.20253,    5.86356,    5.53534,    4.91368,    3.97001,    3.02845,    2.28791,    1.54555,    0.55870,   -0.53031,   -1.38146,
+-739.94824,    4.22052,    3.90312,    3.39999,    2.74741,    1.99142,    1.18364,    0.37680,   -0.37978,   -1.04404,   -1.58425,   -1.98085,   -2.22703,
+-739.30322,    5.06824,    4.56438,    3.77272,    2.76009,    1.60996,    0.41392,   -0.73753,   -1.76363,   -2.60055,   -3.20636,   -3.56323,   -3.67687,
+-739.16473,    5.30410,    4.91553,    4.30079,    3.50576,    2.58837,    1.61301,    0.64460,   -0.25719,   -1.04301,   -1.67751,   -2.14146,   -2.43210,
+-736.69739,    8.79853,    8.42094,    7.81059,    6.99461,    6.00839,    4.89334,    3.69428,    2.45682,    1.22480,    0.03817,   -1.06882,   -2.06871,
+-734.32904,   12.03986,   11.37942,   10.41253,    9.27072,    8.04052,    6.73659,    5.32101,    3.75596,    2.05790,    0.32033,   -1.30946,   -2.68754,
+-736.46539,    9.08386,    8.58840,    7.80864,    6.79541,    5.60690,    4.31370,    2.99494,    1.72147,    0.54269,   -0.51056,   -1.41694,   -2.16573,
+-738.00793,    6.90498,    6.52955,    6.14000,    5.71911,    5.11078,    4.31552,    3.53233,    2.87486,    2.21075,    1.38225,    0.46470,   -0.34030,
+-738.29486,    6.57672,    6.30999,    5.88031,    5.30815,    4.61892,    3.84067,    3.00211,    2.13129,    1.25494,    0.39838,   -0.41441,   -1.16096,
+-735.73694,   10.02796,    9.55337,    8.62896,    7.59865,    6.26828,    4.98420,    3.55015,    2.27467,    0.94200,   -0.18489,   -1.32611,   -2.24739,
+-738.12238,    6.82689,    6.57912,    6.18188,    5.65690,    5.03126,    4.33449,    3.59570,    2.84085,    2.09071,    1.35968,    0.65559,   -0.01955,
+-741.35187,    2.32154,    2.25504,    2.14647,    1.99904,    1.81698,    1.60526,    1.36934,    1.11486,    0.84744,    0.57248,    0.29505,    0.01979,
+-740.19116,    3.91596,    3.71056,    3.37813,    2.93304,    2.39441,    1.78518,    1.13102,    0.45907,   -0.20331,   -0.82985,   -1.39657,   -1.88286,
+-740.28094,    3.82164,    3.71215,    3.53285,    3.28839,    2.98509,    2.63071,    2.23421,    1.80547,    1.35500,    0.89357,    0.43194,   -0.01951,
+-738.56226,    6.22831,    6.04752,    5.75127,    5.34703,    4.84492,    4.25744,    3.59908,    2.88593,    2.13516,    1.36458,    0.59208,   -0.16482,
+-737.59711,    7.48435,    7.10050,    6.68635,    6.21458,    5.52307,    4.61379,    3.69611,    2.90172,    2.11949,    1.20903,    0.25253,   -0.55514,
+-738.57526,    5.76306,    5.26624,    4.74333,    4.66980,    3.88802,    2.90043,    1.82335,    1.50358,    1.10149,    0.47930,   -0.44566,   -0.81899,
+-724.99268,   -0.10466,  -15.73566,   14.00270,   17.04775,  -10.95862,   -4.33519,   15.78423,    2.44723,   -9.77599,    3.77438,    7.40308,   -4.98057,
+-590.63898,  -39.88918,  -61.68854,   29.76473,   38.49112,  -12.07083,  -15.84209,   22.42212,   -4.31226,    9.27367,   -4.15463,   10.12855,  -12.81028,
+-520.53040,  -62.00702,  -56.77345,   34.64658,   41.82232,  -11.03334,  -17.87015,   16.52698,   -0.77704,    5.50097,  -10.93985,   13.53583,  -21.96571,
+-499.40854,  -82.09811,  -42.77442,   46.67083,   30.14816,  -10.77432,   -8.14084,   19.69935,   -3.97295,    7.10202,   -9.52753,   17.72063,  -19.13954,
+-478.88480, -102.54325,  -33.41099,   51.24611,   22.43158,  -15.02392,  -12.73046,   18.12399,   -5.56858,   20.43665,   -5.20860,    1.63608,  -22.93487,
+-460.00089, -117.45983,  -38.14782,   44.41587,   10.85175,  -18.42480,  -20.85704,   13.42610,   -5.06648,   14.77578,   -5.41554,    5.98919,  -12.92177,
+-436.08902, -106.75754,  -37.54352,   47.05707,    6.49411,  -21.96540,  -20.56840,   11.17224,   -4.91976,    8.21666,   -9.43517,    0.02851,  -24.14723,
+-431.53085, -104.52063,  -31.69811,   55.99023,   14.07409,   -9.67273,   -6.03402,   21.91988,  -14.17605,    6.90253,  -10.94309,    6.77816,  -18.33216,
+-424.59906,  -86.83882,  -22.08900,   53.29281,   19.05461,   -7.95386,   -2.32602,   19.13455,  -19.69125,    8.10068,   -6.92678,   10.93707,  -16.15567,
+-428.36624,  -67.50560,  -24.48224,   50.47601,   18.74273,  -20.39020,  -20.49356,   16.66978,  -11.89109,    1.08991,   -8.05492,    5.10515,   -8.56974,
+-443.08276,   -8.88845,  -10.48011,   66.38592,   49.91155,    0.86849,   -0.34684,   22.84564,   -2.74701,    0.58778,  -17.34853,    5.22805,  -11.46131,
+-435.82764,   95.80535,   13.62339,   80.46363,   55.52112,   -7.25954,   -1.67090,   10.80670,  -10.50134,   -8.81252,  -25.66192,   -3.25990,  -20.18513,
+-381.94879,  131.20715,  -16.48029,   87.01087,   53.16800,  -45.39627,   11.92146,    9.28672,  -14.34239,  -15.28063,  -32.94108,    3.39632,  -20.16990,
+-374.69403,  118.91750,  -17.20040,   82.15328,   56.67689,  -41.36109,   23.95814,   13.94576,  -15.61728,  -15.42536,  -38.30678,   13.34375,  -29.81994,
+-369.38788,  124.81142,  -24.51602,   89.74030,   48.55617,  -41.11355,   18.53158,    8.42735,  -15.37730,  -14.26523,  -28.02233,    9.68363,  -36.25220,
+-386.45721,  118.97467,  -22.82434,   96.50023,   47.94357,  -37.79622,   16.12708,    1.16924,  -14.44053,  -24.05315,  -24.62144,   17.51671,  -29.46925,
+-421.42725,  115.36848,  -14.01150,   98.95058,   45.11883,  -34.03223,   18.23173,    2.81380,   -6.00298,  -30.70661,  -15.66134,    8.52229,  -39.76482,
+-468.87555,  111.35516,   16.96186,  108.35189,   44.64090,  -28.17288,    8.54648,    1.14605,   -1.83844,  -36.53164,   -6.91851,   17.54620,  -42.04927,
+-544.56995,  101.10758,   43.59412,   97.50617,   41.32880,  -20.49871,    6.59254,   11.98400,   13.62090,  -35.46710,   -5.78840,   22.58328,  -32.60872,
+-616.66064,  102.26636,   52.12772,   74.47034,   45.83502,    0.88920,   13.82385,   27.56186,    4.32556,  -31.13667,   -0.50148,    8.55910,  -24.87976,
+-655.08875,   92.81599,   66.07819,   64.82114,   50.34473,   17.12009,    7.09327,    7.61381,    1.25385,  -12.30105,   -2.86482,   -1.24810,  -11.74414,
+-667.78241,   79.23113,   64.03854,   57.02592,   57.56951,   27.23013,    9.59921,    5.18198,    7.68584,   -4.19990,   -8.00458,   -7.87641,   -6.57486,
+-642.40674,   94.75227,   25.83233,    5.45235,   27.99678,   28.74962,   15.73191,    4.30192,    4.12479,    2.92348,    2.88622,   -2.28952,   -9.06624,
+-451.78699,   99.16212,  -27.62571,  -31.75214,   31.80745,  -16.08509,   11.18785,  -14.27908,  -21.10685,    7.97357,   -0.75501,   -2.21972,   -6.87012,
+-401.80478,  109.05980,  -40.16762,  -28.47931,   26.69852,  -14.71886,   13.88773,  -17.41035,  -25.91043,    4.41777,   -1.47560,   -4.54047,   -9.88411,
+-368.46060,  117.97938,  -37.87859,  -40.05217,   16.11098,  -17.78066,   33.12175,   -9.39918,  -22.65516,   -2.05613,    5.48190,    3.70603,  -14.99805
+};
+
+static const float golden_diff1_features[] = {
+-123.11830,    0.99917,    0.95805,    0.89408,    0.81248,    0.71794,    0.61364,    0.50104,    0.38071,    0.25359,    0.12199,   -0.01007,   -0.13747,
+-110.86054,    0.82501,    0.79184,    0.73540,    0.66041,    0.57449,    0.48119,    0.37892,    0.26721,    0.15073,    0.03548,   -0.07627,   -0.18402,
+ -86.10514,    0.80844,    0.77220,    0.70644,    0.61741,    0.51769,    0.41308,    0.30020,    0.17814,    0.05537,   -0.05868,   -0.16374,   -0.26398,
+ -49.11975,    0.55242,    0.48541,    0.43367,    0.39057,    0.31347,    0.20146,    0.10639,    0.05811,    0.02169,   -0.04557,   -0.12705,   -0.17475,
+   0.19521,    0.21516,    0.12044,    0.11689,    0.17158,    0.15369,    0.05109,   -0.00531,    0.05060,    0.11282,    0.06911,   -0.02435,   -0.03706,
+  -0.03262,   -0.08807,   -0.14386,   -0.11539,   -0.03314,   -0.00671,   -0.04884,   -0.05501,    0.02827,    0.11541,    0.11466,    0.06971,    0.08476,
+  -0.22049,   -0.33602,   -0.36584,   -0.34196,   -0.28473,   -0.26232,   -0.28089,   -0.27253,   -0.20086,   -0.11662,   -0.07589,   -0.05276,    0.01411,
+  -0.25332,   -0.37332,   -0.39287,   -0.38077,   -0.34722,   -0.32975,   -0.32956,   -0.30655,   -0.24162,   -0.16621,   -0.11195,   -0.06195,    0.01795,
+  -0.10887,   -0.15171,   -0.14795,   -0.14679,   -0.14613,   -0.13917,   -0.12300,   -0.10187,   -0.08020,   -0.05580,   -0.02353,    0.01600,    0.05628,
+   0.14574,    0.22539,    0.25125,    0.23819,    0.19844,    0.18125,    0.19373,    0.18966,    0.14405,    0.09475,    0.08440,    0.09461,    0.07662,
+   0.15581,    0.25311,    0.29410,    0.25368,    0.16032,    0.10641,    0.10988,    0.08252,   -0.01615,   -0.11151,   -0.11978,   -0.07825,   -0.07926,
+   0.30504,    0.46966,    0.49986,    0.39949,    0.21782,    0.10238,    0.08256,    0.03093,   -0.11686,   -0.25540,   -0.27020,   -0.22181,   -0.24756,
+   0.69681,    0.83032,    0.56994,    0.48308,    0.48805,    0.30629,   -0.04563,   -0.26607,   -0.23658,   -0.18923,   -0.31124,   -0.45747,   -0.41045,
+   1.23411,    1.01172,   -0.00456,    0.21409,    0.97355,    0.66618,   -0.46384,   -0.82240,   -0.14151,    0.23893,   -0.31966,   -0.84641,   -0.49297,
+   1.76171,    0.80587,   -1.22535,   -0.45464,    1.02315,    0.71143,   -0.59803,   -1.19359,   -0.41006,    0.70317,    0.35665,   -0.79858,   -0.53647,
+   3.43839,    1.20394,   -2.84728,   -1.25620,    1.11030,    0.74963,   -0.45219,   -1.08916,   -0.48705,    0.89677,    0.41597,   -1.13295,   -0.54123,
+   6.29922,    1.56643,   -4.70324,   -1.21880,    1.29313,    0.77561,   -0.27214,   -1.02235,   -0.60671,    0.62004,    0.43811,   -0.68649,   -0.38170,
+  13.59902,    2.97291,   -7.69089,   -1.19724,    1.34852,    0.10822,   -0.16015,   -0.54947,   -0.66831,    0.77170,    0.77724,    0.56974,   -0.44725,
+  25.95202,    3.48014,   -9.57049,   -1.29846,    1.17726,    0.15919,   -0.36061,    0.04513,   -1.06441,    1.13393,    1.13645,    1.74329,   -0.98233,
+  35.41563,    3.74347,   -9.29239,   -1.40323,    0.30726,    0.36464,   -0.50191,    0.71997,   -1.83411,    0.12763,    0.01438,    1.52732,   -1.34014,
+  40.30910,    3.88652,   -8.24915,   -0.51924,   -0.68049,   -0.03190,   -1.15086,    0.78199,   -1.46009,   -0.44920,   -0.90312,    0.88450,   -1.12363,
+  41.16010,    4.18969,   -6.31749,    0.48709,   -0.62803,   -0.33113,   -2.02391,    0.56536,   -1.08399,   -0.61680,   -1.58262,   -0.17104,   -1.31953,
+  36.85773,    4.19310,   -4.33543,    0.52477,   -0.77216,    0.08440,   -2.05214,    0.95521,    0.29329,   -0.04903,   -2.08752,   -0.69174,   -0.55677,
+  26.98730,    3.02555,   -2.70108,    0.37827,   -0.98518,    0.42249,   -1.58129,    0.48110,    0.75456,    0.96493,   -1.02983,   -0.70425,   -0.47341,
+  14.50198,    3.08342,   -1.43540,   -0.41395,   -0.26288,    1.25355,   -0.47332,    0.79696,    1.54798,    1.79086,   -1.01742,   -1.27096,   -0.19327,
+  -0.07425,    3.51203,    0.07904,   -0.84623,    0.57152,    1.84925,    1.18436,    0.97503,    1.68712,    1.80404,   -0.87783,   -0.97168,    1.01794,
+ -10.13729,    6.59868,    0.20801,   -2.22731,    0.70834,    0.99239,    2.04797,    1.25381,    2.11591,    1.73793,   -0.78743,   -0.58378,    1.70931,
+ -11.73542,    9.55152,    0.23723,   -3.87789,    0.81732,    1.30697,    2.35846,    0.37123,    0.84172,    1.45737,    0.26195,    0.25325,    1.43181,
+ -12.08789,   10.97621,    0.84113,   -5.59512,    0.54512,    1.70936,    2.80604,   -0.04408,   -1.23794,    0.64584,    0.67573,   -0.32758,    1.31251,
+ -14.45271,   10.03827,    2.16457,   -6.36324,   -0.71855,    1.59761,    3.24396,   -0.39643,   -1.34466,   -0.12180,    0.68246,   -1.69113,    0.68083,
+ -18.53185,    6.60991,    5.65825,   -4.11071,   -0.05131,    2.15530,    3.25167,   -0.76546,   -1.56379,   -1.75907,   -0.10137,   -2.19705,    1.11219,
+ -21.35056,    3.05293,    9.36903,   -1.03400,    0.00783,    2.01184,    3.33418,    0.55118,    0.17739,   -1.68767,    0.23982,   -1.36500,    1.70118,
+ -18.56425,    2.78826,   11.28176,   -0.03004,   -1.78994,    0.82329,    2.50660,   -0.06341,    0.09623,   -1.57193,    1.14418,   -1.09023,   -0.12693,
+ -12.09316,    3.36681,   11.47227,    0.95308,   -2.17463,    0.00065,    0.84474,   -0.54763,   -0.33692,   -1.05079,    1.07330,   -0.80153,   -2.42432,
+   1.92679,    5.35752,    9.80011,   -0.02818,   -2.80453,   -0.67148,    0.68485,   -0.32753,   -1.23095,    0.20005,    0.88925,   -0.09440,   -3.41398,
+  19.79073,    8.85031,    7.22205,   -2.59490,   -3.92287,   -2.03960,   -0.02755,    0.15578,   -1.13285,    1.33972,    0.17429,    0.10092,   -4.90758,
+  35.92577,   11.48686,    5.10447,   -3.74352,   -4.61698,   -2.36022,   -1.00173,    0.19538,   -1.15090,    1.75878,   -0.54673,    0.65150,   -6.24128,
+  50.58400,   13.03088,    0.42639,   -4.54247,   -5.20881,   -3.31127,   -2.88928,    0.01554,   -1.66646,    1.81067,   -0.44956,    0.39215,   -6.96486,
+  57.90758,   10.98468,   -5.91219,   -5.56255,   -5.41425,   -4.90112,   -3.53827,    0.57130,   -0.89262,    1.92968,   -0.92637,   -0.95299,   -7.09069,
+  55.78551,    5.89209,   -9.03530,   -3.58417,   -2.52814,   -5.95845,   -3.81914,    0.76729,   -0.29327,    0.83804,   -1.38007,   -1.59041,   -3.67697,
+  46.80977,   -0.83933,   -9.70238,    0.72430,    0.84962,   -7.95729,   -4.29974,    2.43014,    1.13752,   -0.20215,   -1.03846,   -0.79795,    0.57891,
+  35.46388,   -4.72663,   -8.90094,    4.67138,    2.66211,   -9.68076,   -5.10884,    1.80854,    0.66877,   -1.86039,   -0.66617,    0.29346,    2.94240,
+  19.55447,   -8.28039,   -5.55247,    9.20015,    4.92099,  -10.90987,   -6.07571,    0.76598,   -0.32883,   -3.58855,   -1.15677,    0.78949,    3.67301,
+   7.47528,   -9.40369,   -1.98193,   12.06493,    6.37570,  -10.15639,   -5.08030,   -0.06683,   -2.30887,   -4.74815,   -2.23084,    0.41731,    4.12727,
+  -2.35128,   -8.86452,    2.02752,   14.25492,    6.66997,   -7.73172,   -2.34869,   -1.13136,   -4.27439,   -5.14893,   -2.52870,    0.22870,    3.19781,
+ -13.74784,   -8.39473,    7.58204,   16.23704,    5.20402,   -4.16565,    0.95284,   -1.97663,   -4.40917,   -4.80555,   -2.17866,    1.63009,    1.38432,
+ -28.90904,   -3.94742,   12.48695,   16.28066,    2.81784,    0.13804,    2.36518,   -3.08546,   -3.78945,   -4.59171,   -1.65770,    2.07464,   -0.09225,
+ -40.26587,   -3.22518,   14.64699,   15.65972,    0.61256,    6.07845,    2.82745,   -2.91284,   -1.93971,   -4.22240,   -1.75182,    1.26154,   -0.45747,
+ -45.14349,   -3.95551,   14.24252,   13.35371,   -0.66332,   10.23118,    2.24892,   -3.26564,   -0.24511,   -3.25066,   -1.04008,   -0.10879,   -0.11971,
+ -50.70501,   -2.20058,   13.77331,    8.13219,   -0.59765,   10.97492,    1.83205,   -2.69995,    0.97870,   -2.27968,   -0.32133,   -1.40878,    0.84365,
+ -51.82294,   -1.78247,   11.21226,    3.38317,    0.42440,   11.02288,    1.51470,   -2.02442,    2.18843,   -0.48708,    1.11811,   -0.94949,    1.98539,
+ -49.81170,   -3.51574,    7.56374,   -1.76232,    1.39623,    9.76060,    1.62028,   -0.36254,    3.84997,    1.17318,    1.67293,   -0.91917,    1.80862,
+ -44.15802,   -5.91185,    3.48952,   -5.78543,    2.65587,    8.07939,    1.97629,    1.60731,    4.55197,    2.19508,    1.27429,   -1.31118,    1.56218,
+ -35.33823,   -8.08526,    0.15498,   -7.07355,    3.57980,    6.18474,    2.92167,    2.86992,    3.89624,    2.65474,    0.84629,   -1.42729,    1.20335,
+ -24.67462,   -9.85952,   -2.72238,   -7.17991,    2.86062,    3.23167,    4.07403,    3.24118,    3.37722,    2.99099,    1.17120,   -0.22057,   -0.06190,
+ -20.59121,   -7.97999,   -4.90647,   -7.59573,    1.78547,    0.12186,    4.10282,    2.56175,    2.72723,    3.25732,    1.48873,    0.67668,   -0.26983,
+   3.07428,   -9.33687,   -7.83053,   -7.12828,   -1.75935,   -1.53312,    2.67727,    1.46145,    2.30059,    3.12121,    1.47116,    2.10630,    0.50347,
+  34.85448,  -12.44948,   -8.60693,   -5.78039,   -5.09658,   -1.93558,    0.88214,   -0.10276,    2.24627,    2.23317,    1.37225,    3.08518,    1.09415,
+  53.15696,  -15.17300,   -5.60713,   -4.74609,   -6.88502,   -2.72789,   -0.78592,   -1.64544,    1.69279,    0.83795,    0.87184,    3.41102,    1.33665,
+  63.19269,  -18.34933,   -2.68293,   -1.60527,   -5.52794,   -2.98085,   -2.15557,   -1.58699,    2.18150,   -0.01729,    0.79118,    4.58937,    2.20473,
+  65.03039,  -20.59490,   -2.70460,    0.33127,   -4.29067,   -3.65004,   -3.10802,   -0.79441,    2.44594,   -0.59979,    0.76763,    4.64011,    2.38635,
+  58.15821,  -18.48174,   -4.17419,    1.99928,   -3.14752,   -4.23100,   -3.76360,    0.39203,    2.04124,   -0.63182,    1.11693,    3.96532,    2.29718,
+  43.36550,  -12.39132,   -5.47534,    3.43893,   -0.83615,   -3.45579,   -4.07687,    1.14879,    1.74518,   -0.71960,    1.67511,    3.23933,    2.07236,
+  22.79780,   -1.72378,   -3.69967,    4.69732,    2.31691,   -2.70338,   -3.56008,    2.56122,    0.69024,   -1.13102,    1.02780,    1.32513,    0.84953,
+  -2.38226,   11.01462,   -1.65994,    4.38537,    5.09022,   -2.44627,   -3.58302,    2.97971,   -0.93223,   -1.13723,   -0.36051,   -1.66610,   -0.48844,
+  -9.15842,   20.82118,   -2.51988,    1.32690,    4.36041,   -3.41942,   -4.40026,    1.89496,   -2.22513,   -0.65096,   -2.51034,   -3.45496,   -2.17831,
+  -4.90597,   28.78299,   -1.34937,   -2.74360,    3.06130,   -3.39814,   -4.09993,   -0.45970,   -2.64830,    0.34129,   -2.67252,   -4.07549,   -3.01268,
+   1.74398,   30.06878,   -0.49164,   -6.57672,    1.29566,   -1.78235,   -3.59079,   -2.52400,   -1.89671,    0.78753,   -2.28603,   -4.38701,   -3.96275,
+   7.34338,   24.49593,    1.15951,   -8.19852,    0.27615,   -0.33721,   -3.73858,   -3.92873,   -0.98650,    0.85332,   -2.07667,   -3.93494,   -3.94029,
+  10.45519,   16.79375,    1.48509,   -8.80962,   -1.72076,    0.61703,   -3.47419,   -5.23513,   -0.78920,    1.18348,   -1.49720,   -3.53585,   -3.28705,
+  11.18580,   10.93569,    0.14163,   -8.22567,   -3.87385,    0.79017,   -2.48016,   -6.75811,   -1.56071,    1.77140,   -0.15130,   -2.39039,   -2.57756,
+   8.62314,    5.68633,   -2.59710,   -7.38330,   -6.12817,    1.58600,   -0.15663,   -7.56769,   -1.31918,    2.21425,    1.82699,   -0.07501,   -1.56627,
+   4.85575,    3.24181,   -2.64459,   -5.65921,   -7.05089,    2.54483,    2.62323,   -6.91991,   -2.33872,    2.04650,    2.49293,    1.78403,   -1.23099,
+   2.03701,    3.38577,   -1.08578,   -3.92688,   -7.34940,    2.89644,    4.55796,   -6.29122,   -3.56426,    1.25358,    1.90528,    2.64444,   -0.91498,
+  -0.80505,    3.42489,    1.32204,   -2.31205,   -7.14935,    2.82134,    5.29194,   -5.34071,   -4.25586,    0.13365,   -0.09624,    2.46993,   -1.29939,
+  -2.81361,    3.51945,    4.71937,   -0.32877,   -5.78644,    2.50150,    5.30680,   -4.09004,   -4.64764,    0.12808,   -0.99124,    2.73573,   -1.09987,
+  -2.27606,    2.49080,    4.82849,    0.98120,   -3.68185,    2.29999,    4.26559,   -2.39404,   -4.11005,    0.06532,   -1.48285,    2.32967,   -1.51072,
+  -0.89890,   -0.35663,    4.83242,    2.02812,   -0.00512,    0.71252,    2.83080,   -0.13655,   -2.87700,   -0.21353,   -2.20828,    0.14064,   -1.50410,
+  -1.32203,   -3.28788,    4.86801,    3.59329,    3.33925,   -0.65978,    1.17374,    2.83331,   -1.87445,   -0.51870,   -2.48739,   -2.75273,   -0.93531,
+  -2.95602,   -4.93940,    4.74206,    6.03304,    6.66710,   -2.46849,   -0.35298,    5.41546,   -1.11451,   -1.58884,   -2.44900,   -4.84549,   -0.13159,
+  -5.67693,   -6.20691,    4.26896,    8.38822,    8.77156,   -4.73230,   -0.31846,    7.51461,   -0.41656,   -2.93236,   -1.65841,   -5.44101,    0.14973,
+  -9.32123,   -6.92015,    3.96077,    9.64051,    8.42496,   -5.98046,    0.60400,    8.73328,   -1.11180,   -4.11314,   -0.57883,   -5.09402,    0.24460,
+ -14.18911,   -7.83175,    4.75672,   10.62509,    7.19545,   -5.97903,    1.25377,    8.04866,   -1.43700,   -5.43428,    0.82436,   -4.04638,    0.94682,
+ -20.77600,   -8.30645,    7.26155,   11.64701,    5.45212,   -3.94525,    2.49760,    6.01494,   -1.69385,   -6.71214,    1.11581,   -2.55983,    1.50917,
+ -25.60518,   -8.60620,   10.57376,   11.97685,    3.51226,   -0.93216,    2.64148,    3.62231,   -1.49824,   -6.03131,    1.73962,   -0.48087,    1.12084,
+ -28.48710,   -8.54384,   11.43639,   10.75773,    1.08724,    2.34973,    2.50412,    0.15479,   -1.55349,   -4.62047,    1.82277,    1.66091,    0.56405,
+ -26.47807,   -7.74785,   10.66816,    8.20806,    0.76822,    4.70722,    3.37597,   -2.07676,   -0.75210,   -2.27726,    1.56973,    1.90455,    0.40052,
+ -20.38836,   -6.68279,    7.89335,    4.70084,    1.09125,    6.03361,    3.52933,   -2.70703,    0.57110,   -0.01144,    0.67969,    0.64763,   -0.18611,
+ -11.98435,   -4.37924,    3.71503,    2.29466,    1.19429,    4.08454,    2.16590,   -1.53341,    1.50450,    0.67063,   -0.69825,   -0.57534,   -0.57711,
+  -2.73807,   -1.20215,   -1.02834,    0.62445,    2.48680,    0.76224,    0.46599,    0.57219,    1.49698,   -0.24021,   -1.26501,   -0.70107,   -1.25617,
+   6.19413,    2.61517,   -6.91054,   -2.30359,    2.99197,   -2.33490,   -0.73984,    2.60685,    0.72150,   -1.79121,   -1.96885,   -0.67395,   -2.11103,
+  13.52636,    6.68200,  -11.03947,   -4.94872,    3.56298,   -4.77247,   -2.25040,    3.99700,    0.30644,   -1.53599,   -1.90471,   -0.24907,   -1.02465,
+  16.04592,   10.07306,  -12.51600,   -6.40530,    4.13849,   -4.61784,   -2.48754,    3.82767,   -0.50564,   -0.49293,   -2.65747,   -0.36906,    0.32286,
+  14.35237,   12.35609,  -11.84781,   -8.20980,    3.57165,   -2.36192,   -2.35247,    1.85376,   -1.43877,    1.02273,   -2.58947,   -1.12470,   -0.46274,
+   9.73970,   13.40854,   -9.21540,   -9.77718,    2.49565,    0.57999,   -0.93151,   -1.07202,   -2.64612,    2.08634,   -1.41460,   -0.62353,   -1.10912,
+   3.49330,   12.62119,   -4.85997,  -10.79564,    1.18898,    3.79537,    1.69124,   -3.31593,   -4.18121,    2.61258,   -0.39545,    0.24283,    0.03696,
+  -1.84747,   10.77017,    0.16821,  -11.92192,   -0.90363,    5.84449,    4.37664,   -4.18188,   -4.24437,    3.07181,   -0.42259,    0.11295,    1.16443,
+  -6.69855,    8.96735,    4.33632,  -10.70571,   -3.98391,    5.18672,    5.66782,   -3.40608,   -3.23547,    2.49260,   -0.94673,   -0.80631,    1.93462,
+ -11.40426,    7.45498,    7.96113,   -7.77649,   -6.53836,    3.37039,    6.13666,   -1.71124,   -1.95125,    0.44907,   -0.72452,   -0.69973,    2.27975,
+ -14.27764,    6.18340,    9.30103,   -4.99569,   -7.44876,    1.11961,    5.27760,    0.04002,   -0.09942,   -2.06586,   -0.54380,   -1.06767,    0.98289,
+ -15.09284,    5.75743,   10.08707,   -2.46157,   -7.77299,   -1.47011,    2.54240,    2.66415,    2.96622,   -2.81969,   -0.75924,   -0.36479,    0.75600,
+ -15.33435,    5.41592,    9.02124,    0.11162,   -6.05019,   -2.12545,   -0.31740,    4.02868,    4.68107,   -2.18970,   -1.29622,    0.36290,    0.98462,
+ -15.05021,    4.39790,    5.51100,    1.43249,   -3.43379,   -1.64553,   -2.34462,    3.77202,    6.13566,   -0.73397,   -1.83753,   -0.46790,   -0.02438,
+ -13.72513,    3.29787,   -0.04745,    2.07437,   -0.57272,   -0.02380,   -3.58655,    2.28870,    5.93155,    0.45545,   -0.58019,   -0.40121,   -0.99625,
+ -12.22198,    1.42758,   -5.25288,    1.90606,    2.39094,    2.75039,   -2.96300,    1.50009,    4.81733,    1.07018,    0.06479,    0.06655,   -0.98787,
+ -10.01205,   -0.37804,   -8.18648,    1.17609,    4.23761,    4.32519,   -1.64548,    0.76116,    3.26145,    1.87996,    0.50699,    1.20348,   -0.10082,
+  -8.75645,   -2.85586,  -10.75044,    0.57903,    5.52579,    5.70370,   -0.84751,   -0.24039,    1.66081,    2.39009,    1.33372,    2.56615,    1.76790,
+  -9.91855,   -5.40492,  -11.52912,    0.35217,    5.71640,    6.60860,    0.23005,   -1.26582,    0.18645,    2.18125,    2.74341,    3.40672,    1.84320,
+ -11.42108,   -8.08009,  -11.00284,    0.44441,    5.36715,    5.07123,   -0.97200,   -3.22489,   -0.54376,    2.61974,    3.56344,    1.52315,    0.84333,
+ -12.73294,   -9.61740,   -7.34149,    2.65644,    4.32692,    2.20847,   -2.72633,   -2.86079,    0.68873,    2.76868,    3.37408,    0.37999,    0.40724,
+ -13.84710,  -10.25110,   -2.25508,    5.39586,    2.95018,   -1.31748,   -4.62441,   -2.63310,   -0.17137,    0.86537,    2.27429,    0.08884,   -0.15712,
+ -14.84188,  -10.75873,    2.32538,    6.62349,    1.49554,   -4.92618,   -5.65451,   -2.16402,   -0.99870,   -0.18570,    1.54716,   -0.15523,   -1.03129,
+ -14.62582,  -10.17378,    4.48427,    7.53285,    0.05729,   -7.41222,   -6.38270,   -2.24648,   -1.87079,   -1.29221,    1.41200,   -0.04917,   -1.12492,
+ -11.98472,   -8.68415,    4.76730,    7.00370,   -1.35307,   -8.77491,   -5.33223,   -0.76328,   -1.94678,   -2.01072,    0.87653,   -0.80704,   -1.56335,
+  -6.68833,   -5.61523,    4.94088,    6.02469,   -3.40081,  -10.02078,   -3.68155,    0.56931,   -1.93419,   -2.55908,    0.42494,   -1.11725,   -1.20990,
+   2.81004,   -2.02520,    4.70157,    5.01186,   -3.81974,   -8.93846,   -1.26274,    0.67761,   -2.15123,   -2.31932,    0.14151,   -0.30736,   -0.77184,
+  15.24441,    3.16399,    3.06654,    3.39242,   -2.77780,   -6.01506,    1.52849,    0.88032,   -2.74911,   -1.95009,    1.27164,    0.70505,   -1.87823,
+  27.13629,    6.04094,    0.94129,    1.37315,   -2.23185,   -4.18834,    2.30917,   -0.34200,   -2.37557,   -0.96387,    1.79692,    0.01351,   -3.13576,
+  36.91251,    7.70456,   -0.79154,    1.48468,   -1.34173,   -2.89329,    2.08928,   -0.31521,   -1.40975,    0.63808,    1.15078,   -0.36513,   -2.05920,
+  41.80196,    6.59528,   -1.48675,    2.87300,   -0.67878,   -2.79773,    1.21568,   -0.70548,   -1.32912,    0.45099,    0.36933,   -0.66049,   -0.57649,
+  42.39352,    3.75095,   -1.67266,    3.02999,   -0.70875,   -1.71865,    0.77996,   -0.28122,   -1.43765,   -0.34925,   -0.20487,   -0.26833,   -0.61056,
+  38.32924,    0.68278,   -2.31969,    2.84187,   -0.21468,   -0.83297,   -0.13737,    0.29246,   -1.01017,   -0.51026,   -0.25605,    1.08342,   -0.19962,
+  28.71867,   -0.65356,   -2.46608,    2.81041,   -0.60524,   -0.96247,   -0.99773,    1.74791,    0.02007,   -0.41452,   -0.49988,    2.43381,   -0.14864,
+  10.69030,   -0.37141,   -1.52464,    2.49809,   -1.96297,   -0.80192,   -1.19818,    3.21631,    2.74220,   -0.49875,   -0.38280,    2.44637,    0.95079,
+  -4.21305,   -0.19767,   -2.05400,    3.22323,   -1.19091,    2.63238,   -0.57677,    1.48780,    4.00736,   -1.07241,   -2.34252,    2.89817,    1.96701,
+  -4.45865,    0.02820,   -3.71148,    2.15166,   -0.41589,    5.15389,   -0.32820,    0.98058,    4.91290,   -0.80492,   -2.81405,    3.91624,    2.47771,
+  -1.99250,    0.79772,   -2.89936,    0.71907,   -0.35009,    5.05768,   -0.36603,    0.89149,    5.55362,   -0.60737,   -2.15034,    3.59069,    1.76190,
+   0.64822,    2.79693,   -3.79425,   -0.58293,   -0.31688,    3.59744,   -0.47969,    0.95906,    3.68599,    0.25640,   -1.84443,    2.73738,    1.88444,
+   3.28015,    2.27138,   -4.71726,   -1.90106,    0.02296,    1.33422,   -0.94024,   -0.19434,    1.15872,    1.37669,   -1.23947,    1.17146,    2.57427,
+   9.37088,    1.06541,   -5.01879,   -4.62926,    0.43655,   -0.45519,   -1.95563,   -0.39005,   -1.49931,    1.56708,   -1.05569,   -0.54155,    2.12925,
+  16.96761,   -0.90822,   -5.84950,   -6.81510,    0.50724,   -2.14934,   -3.56798,   -0.75367,   -3.51565,    1.38109,   -0.25178,   -1.43665,    1.40476,
+  21.50877,   -1.69229,   -4.89472,   -7.69609,   -0.54430,   -4.54768,   -5.23466,   -0.75794,   -4.32952,    1.02574,    0.03743,   -1.93897,    0.99259,
+  17.27320,   -0.13019,   -2.49389,   -8.23005,   -1.82934,   -5.20975,   -5.30269,   -0.40506,   -2.55345,    1.39355,    0.78113,   -3.33895,    1.27909,
+   9.05269,    1.69877,   -2.70827,   -7.29207,   -0.80949,   -1.77445,   -4.22542,   -2.64609,   -1.22710,    1.36135,   -0.71990,   -3.35986,    2.12678,
+   7.36781,    1.17238,   -2.84311,   -6.44386,   -0.70203,    1.53942,   -2.44470,   -4.07329,    0.88151,    1.66190,   -1.48027,   -2.23438,    3.09518,
+   4.75160,   -2.04617,    1.36224,   -4.19334,   -2.04990,    3.63767,   -1.47533,   -4.89096,    3.27745,    1.88681,   -1.29670,   -2.59379,    2.67214,
+   1.94100,   -7.31669,    6.28434,   -1.98404,   -3.42142,    5.51068,   -0.86725,   -4.04228,    4.28021,    1.90879,   -0.30868,   -2.44035,    1.25195,
+  -1.05833,  -13.52023,   11.86317,    0.57902,   -3.02504,    8.18123,    0.93464,   -3.38367,    4.89950,    2.45062,    0.39102,   -1.66330,    0.19091,
+  -2.49104,  -18.57310,   16.17573,    1.79826,   -1.45206,    9.74734,    1.72970,   -2.56075,    4.12025,    2.68600,    0.63560,   -0.79943,   -0.06874,
+  -2.59974,  -23.43774,   16.58014,    1.67555,   -0.74508,    9.68651,    2.50153,    0.24997,    4.18836,    1.83014,    0.56718,   -0.79837,   -1.19192,
+  -1.49893,  -26.44437,   16.21926,    2.22600,   -0.49411,    8.23197,    2.48956,    2.55461,    4.43638,    0.67917,    0.51508,    0.02263,   -1.07525,
+   0.17367,  -27.45265,   13.55627,    1.32274,   -0.18477,    6.72377,    2.53765,    3.99385,    4.53051,    0.44450,    0.80651,    0.69672,   -0.46945,
+   0.04410,  -24.98162,    8.70683,    0.25714,    0.56338,    5.31583,    2.75320,    4.54420,    4.71225,    0.21563,    0.73014,    1.51762,   -0.42208,
+  -3.28887,  -18.07655,    2.45797,   -0.15476,    0.96318,    2.42861,    3.50636,    3.80064,    3.69052,   -0.23977,    0.16358,    1.33120,   -0.38106,
+  -9.19778,  -10.13179,   -1.92130,    1.50227,    1.27064,    0.86660,    4.24795,    2.30520,    2.57223,    0.13229,    0.53934,    1.08597,   -0.95556,
+ -13.00829,   -0.81566,   -5.66277,    1.93589,    3.78529,    0.93587,    2.53763,    0.98734,    0.55445,   -0.94121,    0.56234,    0.27065,   -1.77174,
+ -18.76268,   11.84327,   -7.87458,    2.24731,    6.62282,    0.69497,    0.74281,   -1.08260,   -1.26590,   -1.79175,    1.22720,    0.14458,   -2.47265,
+ -25.99911,   24.02445,   -7.24912,    2.09671,    7.14326,   -0.18926,   -1.15170,   -3.18595,   -5.27186,   -2.12673,    2.03488,    0.60253,   -2.13838,
+ -31.28606,   31.60418,   -7.03757,    1.48232,    7.48913,   -2.45022,   -1.44005,   -2.90936,   -7.53694,   -2.10945,    2.40383,   -0.14134,   -2.40903,
+ -31.94234,   35.32645,   -6.11949,    0.83866,    7.35700,   -4.76190,   -1.40555,   -1.92809,   -8.52276,   -1.93904,    2.40511,    0.09211,   -1.40625,
+ -28.75794,   34.47496,   -4.98783,   -0.16244,    6.55911,   -6.28123,   -1.09982,   -0.52298,   -8.61766,   -1.30856,    2.01304,    0.09461,   -0.08601,
+ -22.37448,   30.00859,   -3.71303,   -1.16607,    5.12686,   -6.68807,   -0.77428,    0.78317,   -7.70137,   -0.47778,    1.83486,    0.38777,    0.36698,
+ -15.51739,   22.34031,   -2.45110,   -1.67907,    2.42308,   -7.06228,   -0.00384,    1.56840,   -6.59758,   -0.12398,    0.54929,   -0.50547,    0.43699,
+  -8.89583,   12.31876,   -0.03110,   -0.47453,   -0.34662,   -6.05444,    1.16067,    2.17346,   -4.67122,    1.67708,    0.31698,   -0.40014,    0.26673,
+  -1.26582,    4.44218,    0.64815,   -0.45421,   -0.17520,   -3.35307,    1.11054,    1.79210,   -2.85642,    1.33460,   -0.91608,   -1.23376,    0.36333,
+   2.70118,    0.71353,    0.01867,   -0.82814,    0.83380,   -1.81299,   -0.24275,    0.40755,   -0.50411,    0.53273,   -1.06812,   -1.10424,    0.29433,
+   4.74178,   -2.34778,    0.06226,   -0.96192,   -2.02295,    0.66886,   -1.19536,   -0.69350,    1.14850,   -0.70856,   -1.35563,    0.13132,    0.38962,
+   7.71573,   -2.73441,   -0.39019,   -1.05507,   -4.26551,    1.49125,   -1.56280,   -1.93073,    2.51715,   -0.93571,   -0.81998,    0.89451,   -0.03053,
+  17.80878,   -1.92802,   -3.36182,   -3.93137,   -7.20354,    2.14663,   -1.99473,   -2.71323,    3.99794,   -0.36306,   -0.47921,    1.05937,   -0.24241,
+  26.55011,   -1.69229,   -7.26775,   -7.14862,   -8.89414,    1.35240,   -1.34739,   -3.56304,    4.77341,    0.29911,    0.31556,    1.23076,   -0.70165,
+  32.91570,   -2.08066,  -11.59069,   -9.47287,   -9.53222,    0.20750,    0.37125,   -4.39626,    4.81612,    2.05855,    1.25449,    2.06052,   -1.68883,
+  35.40212,   -2.26355,  -14.47091,  -10.66311,   -9.46641,   -0.53875,    2.96014,   -4.37613,    4.12767,    3.24393,    0.83255,    2.11693,   -2.09055,
+  35.30202,   -0.97347,  -16.03527,  -10.94610,   -9.24308,   -1.59276,    4.52284,   -3.65974,    2.70928,    4.61573,    0.38373,    2.56052,   -2.26927,
+  30.86695,    1.05409,  -14.93660,  -10.22094,   -9.13342,   -2.48704,    5.11750,   -2.66507,    1.83895,    5.30424,    0.05566,    2.63695,   -2.60124,
+  22.42946,    1.53519,  -11.75891,   -8.88198,   -7.90977,   -3.31742,    4.48760,   -2.26071,    0.96900,    5.55024,    0.30507,    3.01267,   -2.36594,
+  13.52437,   -1.77012,   -8.01396,   -5.71224,   -7.08577,   -1.67191,    4.18823,   -1.41550,    1.25624,    4.19230,    0.24781,    3.83969,   -2.03469,
+   6.29797,   -2.94556,   -3.31295,   -2.05268,   -4.96755,    0.37324,    4.42386,   -0.47072,    1.38078,    2.33410,    0.60536,    4.04449,   -1.66715,
+   7.03388,   -1.88699,   -0.89831,   -0.72831,   -3.98922,    2.31390,    2.84756,    0.91529,    1.53131,    0.41264,    0.75684,    2.66646,   -1.58468,
+   8.59103,   -0.75111,   -0.43501,   -1.03709,   -2.40834,    2.40848,    0.64505,    1.49241,    1.49464,   -1.05527,    1.36277,    0.88936,   -1.70518,
+   9.02918,   -0.30275,   -0.93235,   -0.50864,   -0.00630,    1.64730,   -1.84104,    1.71915,    1.81658,   -0.94907,    2.29689,   -0.44372,   -2.23498,
+   8.03955,    0.84996,   -1.02862,    1.12941,    2.55989,    1.00710,   -3.25440,    2.13028,    1.70638,   -1.34126,    1.69704,   -2.57947,   -1.14129,
+   3.92637,    4.26742,   -1.43864,    3.36519,    4.77079,    0.20147,   -4.80717,    1.80189,    0.52371,   -2.19936,    2.02969,   -4.02197,    1.19553,
+  -9.75186,    7.95319,    1.89195,    6.87313,    7.24508,    0.38969,   -4.94622,    2.20894,    0.13242,   -1.80711,    2.24500,   -4.14295,    3.29014,
+ -25.74727,   10.56297,    6.98925,    8.53384,    8.44463,   -0.16862,   -5.69018,    2.25549,    0.03712,   -0.81912,    1.86413,   -3.81732,    4.50172,
+ -39.38715,    9.53511,   10.49757,    8.89239,    8.93848,    0.04543,   -5.49953,    2.53636,    0.02893,   -0.96121,    1.03970,   -2.43473,    5.16357,
+ -50.86744,    5.08216,   14.87293,    8.96620,    8.74975,    2.25109,   -2.36884,    2.40114,    0.27089,    0.14961,    0.52958,   -1.54921,    4.95654,
+ -58.32141,   -1.38571,   17.60396,    8.66702,    7.29410,    2.72607,    0.21255,    2.49070,    0.35523,    0.19951,   -0.24277,   -0.63393,    4.78122,
+ -58.97495,   -7.18837,   17.04962,    6.17018,    4.73773,    2.75655,    3.24712,    2.91209,   -0.07952,   -1.17464,   -2.15621,   -0.31728,    3.84731,
+ -53.48480,  -12.16156,   14.36180,    3.41863,    2.32079,    2.62316,    5.10355,    3.95882,    0.16645,   -1.69142,   -3.53495,    0.29989,    2.53144,
+ -44.41793,  -17.75593,   10.04019,    2.15446,    0.66357,    2.61836,    5.61286,    4.24551,    0.40771,   -1.43346,   -4.32993,    0.17271,    1.08880,
+ -32.49360,  -20.53106,    3.83417,    1.33373,   -0.75217,    2.92861,    5.26812,    3.42742,    0.34384,   -1.50851,   -3.15650,   -0.04079,    0.94127,
+ -25.40067,  -21.01723,   -0.83224,    1.31692,   -0.78758,    3.13710,    5.08402,    2.37012,    0.02091,   -1.24625,   -1.78009,    0.05981,    1.12072,
+ -19.91983,  -18.48783,   -3.51829,    0.58294,   -0.29967,    2.00412,    3.52609,    1.75950,    0.22128,   -0.11042,   -0.35183,    0.31160,    1.07019,
+ -13.91170,  -14.94708,   -5.91401,   -0.55662,    0.44003,    0.86219,    1.72723,    1.50295,    0.40767,   -0.00982,    0.65065,    1.28056,    1.03996,
+  -4.63321,   -9.80123,   -7.14623,    0.09051,    2.13378,   -0.11032,   -1.33640,    0.21459,    0.93396,    1.34634,    2.63592,    1.67171,   -0.02679,
+  21.24445,   -4.62768,   -7.42309,    0.91250,    2.11867,   -1.97277,   -4.06952,    0.10994,    1.92382,    1.49673,    2.75147,    1.67524,   -0.69527,
+  43.62434,    1.22208,   -7.76417,   -0.23891,    0.78057,   -2.94695,   -4.60009,    0.11306,    1.76363,    0.24628,    1.19909,    1.69104,   -1.04623,
+  56.04261,    7.20722,   -8.21888,   -2.89665,   -1.99220,   -3.58552,   -2.67945,    0.90029,    1.54129,   -0.23016,   -0.35601,    2.21120,   -0.76100,
+  58.20672,    9.91913,   -6.27579,   -4.32141,   -4.46382,   -4.31170,   -0.54358,    0.68694,    0.73825,    0.89305,   -1.00545,    2.85541,   -0.52620,
+  54.42525,   11.74144,   -2.81250,   -4.47458,   -5.34716,   -4.12288,    1.39709,   -0.72710,   -1.16623,    2.54309,   -2.55042,    2.44672,   -1.00524,
+  44.90937,   14.39153,    2.49791,   -4.05870,   -7.44950,   -4.38335,    2.39249,   -2.48273,   -3.55761,    3.12409,   -4.36542,    2.07686,   -1.46756,
+  29.78906,   17.28297,    7.11228,   -3.67632,  -10.60783,   -5.43260,    3.32754,   -2.87728,   -4.27145,    4.27696,   -5.32469,    0.24385,   -1.89313,
+   8.23970,   16.90738,    9.96211,   -2.83619,  -13.45532,   -5.24715,    4.28713,   -2.86439,   -4.64102,    4.91937,   -5.47223,   -1.09461,   -1.38145,
+ -10.62254,   15.58126,   11.06381,    0.09486,  -13.33739,   -5.31992,    2.18137,   -3.60659,   -5.12573,    4.40507,   -4.65969,   -1.08478,   -1.16525,
+  -7.07778,   15.01011,    9.46250,    1.74451,  -12.55043,   -5.24038,   -0.16576,   -2.61809,   -3.76376,    2.58781,   -5.10532,   -1.42833,   -1.89937,
+  -0.06750,   13.94813,    5.54204,    1.12110,  -10.18889,   -4.28095,   -1.42396,   -0.97186,   -1.76278,   -0.27610,   -4.38760,   -1.66195,   -2.60128,
+   3.82889,   12.16779,   -0.54229,   -2.07004,   -6.23017,   -2.32188,   -1.18671,    1.19706,    0.06024,   -4.47695,   -2.15927,   -0.72798,   -2.52633,
+   6.81479,    8.40311,   -6.48447,   -5.94777,   -1.04667,    0.17513,   -0.01482,    1.86478,    0.85430,   -8.31264,    1.68402,    0.95341,   -1.03125,
+   9.82745,    3.55038,  -10.67489,   -8.01563,    6.79512,    2.96815,   -0.11098,    0.09600,   -0.52539,   -9.69904,    5.21271,    1.82310,   -0.39903,
+  11.45684,    0.03036,  -11.73270,   -8.08089,   13.33979,    4.47437,   -2.38381,   -2.60420,   -2.09416,  -10.26572,    7.00157,    2.26133,   -0.18476,
+  11.62225,   -1.50441,  -10.69765,   -6.47235,   17.22651,    4.44808,   -5.70378,   -3.58756,   -1.06231,   -9.22693,    7.28444,    0.49821,   -0.56244,
+   9.23613,   -3.31189,   -8.60070,   -2.32909,   18.41258,    3.72011,   -8.72706,   -3.12103,    0.62269,   -7.85225,    6.69340,   -0.94240,   -0.20668,
+   5.98558,   -7.52313,   -4.65974,    3.67510,   16.53122,    2.75163,  -10.03322,   -2.52747,    1.41834,   -6.69265,    4.89216,   -1.46378,   -0.41327,
+   2.99743,  -11.12050,   -0.02822,    9.29017,   12.12208,    1.42700,   -9.37573,   -0.91877,    2.59866,   -4.37121,    1.41160,   -1.46756,   -1.46802,
+  -0.06049,  -13.06933,    4.45271,   13.28728,    7.14645,   -0.50260,   -6.44046,    0.92776,    3.48747,   -0.93990,   -1.26318,   -1.45002,   -2.88334,
+  -3.36130,  -13.31747,    8.24255,   14.94259,    3.26655,   -1.71282,   -3.13191,    3.31142,    3.51931,    0.74346,   -2.94420,   -1.02752,   -3.05788,
+  -4.43719,  -12.11914,    9.68708,   13.55916,    0.85073,   -1.94101,    0.52049,    5.09686,    2.81389,    0.89194,   -3.52238,   -0.75242,   -2.35841,
+  -3.38847,  -10.87211,    9.54265,   10.87654,    0.06512,    0.08463,    3.56519,    5.39448,    1.59345,    1.27442,   -2.89335,   -0.53296,   -0.53830,
+  -1.60134,  -11.19891,    8.66631,    7.12740,   -0.79799,    1.83884,    4.41223,    3.83193,    0.05234,    2.93964,   -2.00153,   -1.56419,    1.16289,
+   1.45920,  -13.75778,    9.37214,    4.71567,   -1.18614,    4.61565,    3.94317,    2.52049,    0.06108,    4.27000,   -1.90385,   -2.71746,    2.19313,
+   2.65486,  -16.41823,   10.58618,    2.70309,   -1.48818,    6.06531,    2.58464,    1.43941,    1.76507,    4.55765,   -0.49548,   -2.66073,    3.89439,
+   3.64922,  -19.56205,   10.30426,    1.67804,   -1.72896,    7.09416,    1.80202,    0.74372,    4.14972,    4.21858,    0.09398,   -2.99403,    4.15199,
+   3.91389,  -20.88485,    9.22920,    0.27776,   -2.00880,    8.27171,    1.99566,    0.73880,    5.41566,    4.41979,    0.53800,   -2.62236,    3.69295,
+   3.76984,  -19.47481,    7.89668,   -0.69767,   -2.18589,    8.10510,    2.76700,   -0.13585,    5.14756,    4.00312,    0.44569,   -1.36767,    2.53594,
+   3.10047,  -14.73316,    6.09035,   -1.17428,   -2.34116,    7.01329,    2.11292,   -2.09845,    4.14803,    3.43358,    0.47169,   -0.41870,    1.53394,
+   1.32047,   -5.86912,    3.12136,   -2.06694,   -1.77074,    4.78289,    0.61932,   -3.25998,    1.72100,    2.43805,    0.95504,   -0.09067,    0.12897,
+  -1.72635,    5.67588,   -1.76008,   -2.73056,   -1.28893,    3.15430,   -0.85754,   -4.01433,    0.01372,    0.84282,    0.09917,    0.08895,   -0.97511,
+  -4.95503,   16.46264,   -7.80901,   -4.00959,   -0.60269,    1.51427,   -2.40550,   -4.84695,   -1.42961,   -0.93863,   -0.58828,   -0.70918,   -1.93054,
+  -6.15577,   22.72925,  -10.73279,   -4.47260,    0.40074,   -0.37692,   -2.86899,   -4.86325,   -2.55904,   -3.26603,   -1.51787,   -0.57631,   -2.52619,
+  -9.36204,   26.66463,  -11.54406,   -3.67332,   -0.25116,   -2.24198,   -2.49954,   -5.16732,   -2.33882,   -4.74062,   -0.31173,    0.30372,   -2.98273,
+ -15.20423,   27.54249,  -10.61071,   -2.02813,    0.13618,   -3.09263,   -1.77672,   -3.85209,   -1.32558,   -5.08445,    0.55094,    0.01271,   -3.67274,
+ -23.51772,   25.65288,   -7.31953,   -1.10315,    2.90398,   -2.94630,   -0.13000,   -2.15893,   -2.95290,   -3.98030,    1.45540,    0.57831,   -2.43257,
+ -28.91310,   21.67089,   -3.58740,   -0.85989,    5.18609,   -1.68520,    2.23006,   -0.35752,   -4.18589,   -2.62156,    1.11387,    0.63374,   -0.88875,
+ -30.20216,   16.56558,    0.77764,    0.06467,    6.58043,   -0.39967,    3.20560,    0.44316,   -4.32143,   -0.62463,    0.73178,    0.42263,    0.79977,
+ -28.42687,   11.56648,    4.35270,    1.11830,    7.46077,    0.46229,    3.03668,    1.41300,   -4.74689,    1.08841,    1.09717,    0.03241,    2.11287,
+ -25.08270,    7.77350,    6.18683,    1.80671,    6.93339,    1.23991,    2.73674,    2.13388,   -4.81290,    2.93157,    0.96031,    0.00314,    2.61138,
+ -20.78136,    4.67761,    5.75201,    2.12302,    6.13397,    2.10596,    2.66419,    2.75081,   -3.90445,    3.70042,    1.10107,   -0.22610,    2.31713,
+ -13.46454,    1.02092,    3.93720,    1.57585,    6.20745,    1.51712,    2.82467,    3.39257,   -2.72285,    2.73447,    1.37315,    0.52436,    2.78130,
+  -5.76264,   -1.22873,    0.79626,    1.82444,    4.56921,    0.58728,    2.43814,    2.92308,   -0.52706,    2.21018,    1.86391,    1.17943,    2.24987,
+  -1.44083,   -2.31218,   -1.35512,    2.11496,    2.26353,   -0.14471,    1.07082,    2.83232,    2.00566,    2.38910,    2.27280,    1.26996,    1.32193,
+  -1.87221,   -2.92470,   -1.52263,    2.08718,    1.56698,   -0.51738,    0.41417,    2.34894,    1.81023,    1.79143,    2.48958,    1.86194,    0.95187,
+  -4.23272,   -3.52911,   -0.05163,    1.94083,    0.61919,    0.29370,    1.25501,    1.97348,    1.63943,    1.14314,    0.83332,    0.41114,    0.57006,
+  -7.54358,   -4.47028,    2.50155,    2.29922,   -0.76621,    1.00946,    2.36928,    1.47940,    1.26880,    1.23042,   -0.04485,   -0.54338,    0.40080,
+  -9.74334,   -5.52010,    3.99529,    3.12108,   -1.07245,    0.98784,    2.51061,    1.22118,    1.20767,    0.96915,   -1.01663,   -1.06841,    0.74825,
+  -4.26644,   -4.94790,    0.22937,    2.41042,   -3.15696,   -1.26886,    2.63047,    1.78882,    1.50777,    0.61947,   -1.39230,   -0.64064,    1.38530,
+  -0.92275,   -4.66528,   -2.25085,    1.46434,   -5.28919,   -3.62174,    2.49501,    2.33147,    1.96532,    0.82141,   -1.07020,   -0.19027,    1.43869,
+  -0.61408,   -5.15320,   -4.32445,    0.30326,   -5.09984,   -4.40307,    2.45182,    3.52640,    2.65756,    1.44538,   -0.63919,   -0.67366,    0.87614,
+  -1.43957,   -6.30323,   -6.62573,   -1.59314,   -4.56871,   -4.83830,    1.63700,    4.15749,    3.51661,    1.85555,   -0.18287,   -1.35576,   -0.06179,
+  -4.81377,   -8.84991,   -7.56502,   -2.87500,   -3.74041,   -4.91132,   -0.43869,    3.64460,    4.35858,    2.57094,   -0.17287,   -1.19331,    0.19920,
+  -8.93782,  -10.90637,   -7.45249,   -4.14376,   -2.78710,   -3.29015,   -1.35468,    2.46828,    4.15115,    2.92140,    0.46725,   -0.46433,   -0.04547,
+ -14.91245,  -13.63623,   -6.11492,   -5.25653,   -2.83475,   -1.87762,   -2.07324,    0.68378,    2.78237,    1.89539,   -0.17822,   -0.57172,   -0.24379,
+ -21.65250,  -15.86699,   -2.63951,   -5.38553,   -2.94295,    0.49789,   -1.32799,   -0.76350,    1.16474,    1.19402,   -0.06909,   -0.10227,   -0.50733,
+ -27.78066,  -17.66521,    1.29509,   -4.41328,   -2.16524,    2.78819,   -0.76389,   -2.12583,   -0.34769,    0.40564,   -0.13348,    0.46788,   -0.11234,
+ -24.41044,  -16.84285,    0.06626,   -4.11720,   -3.44676,    2.04601,   -0.34149,   -2.65121,   -1.55297,   -0.69065,    0.02506,    1.76441,    1.13287,
+ -18.48285,  -14.11274,   -0.63549,   -3.10682,   -4.32147,    0.71575,    0.29379,   -2.49450,   -2.24068,   -1.35613,    0.77933,    3.25334,    2.35745,
+ -13.87248,  -11.12907,   -1.04583,   -2.56560,   -3.99491,   -0.03370,    0.59145,   -1.87177,   -2.26884,   -1.12270,    1.10905,    3.23326,    2.53021,
+  -9.08447,   -7.92635,   -2.02791,   -3.08573,   -3.92366,   -0.81684,    0.46735,   -1.19261,   -1.88418,   -0.75675,    1.22146,    2.43087,    1.79254,
+  -5.33600,   -5.21240,   -2.22113,   -2.49326,   -2.86820,   -1.25103,   -0.12794,   -0.65089,   -1.01587,   -0.32481,    0.93760,    1.66968,    1.44865,
+  -2.59525,   -2.72141,   -1.47559,   -1.51706,   -1.51809,   -0.57274,    0.12176,   -0.03969,   -0.09426,    0.37483,    0.90498,    1.06274,    0.89507,
+  -1.42737,   -1.89428,   -1.66380,   -1.46703,   -1.23667,   -0.87896,   -0.52353,   -0.27670,   -0.05895,    0.25267,    0.55589,    0.70269,    0.72631,
+  -1.01009,   -1.38642,   -1.31704,   -1.17398,   -1.00001,   -0.79800,   -0.63541,   -0.42318,   -0.19702,    0.04387,    0.24362,    0.43999,    0.57047,
+  -0.77895,   -1.05948,   -1.03922,   -0.96229,   -0.87679,   -0.73403,   -0.63929,   -0.47465,   -0.32427,   -0.15422,   -0.02524,    0.14293,    0.24011,
+  -0.82881,   -1.11742,   -1.10684,   -1.03235,   -0.96362,   -0.81952,   -0.74525,   -0.57821,   -0.43861,   -0.26528,   -0.13865,    0.05660,    0.16186,
+  -0.35972,   -0.47061,   -0.46137,   -0.39041,   -0.35838,   -0.26273,   -0.20377,   -0.09339,   -0.03483,    0.06995,    0.12835,    0.22643,    0.25931,
+  -0.04755,   -0.05598,   -0.03302,   -0.01273,    0.01151,    0.05671,    0.11911,    0.17134,    0.19456,    0.19818,    0.19911,    0.19458,    0.16996,
+  -0.07938,   -0.10116,   -0.08523,   -0.08693,   -0.09317,   -0.07109,   -0.01829,    0.03068,    0.05290,    0.06624,    0.09580,    0.13227,    0.14747,
+   0.06926,    0.10713,    0.11903,    0.11316,    0.10068,    0.11070,    0.14570,    0.17547,    0.17976,    0.17390,    0.17916,    0.18638,    0.17058,
+   0.26443,    0.37722,    0.38035,    0.37451,    0.36403,    0.36042,    0.36336,    0.35838,    0.33535,    0.29982,    0.26079,    0.21541,    0.15515,
+   0.37360,    0.52071,    0.52056,    0.51525,    0.52128,    0.51296,    0.50513,    0.48314,    0.46386,    0.42227,    0.36637,    0.28216,    0.19255,
+   0.13555,    0.19380,    0.21501,    0.24147,    0.28236,    0.31694,    0.35449,    0.38128,    0.40802,    0.41555,    0.40874,    0.37341,    0.32244,
+  -0.29448,   -0.40627,   -0.36830,   -0.31665,   -0.24786,   -0.17582,   -0.09476,   -0.01552,    0.06813,    0.14400,    0.21438,    0.26481,    0.29733,
+  -0.60144,   -0.83460,   -0.78682,   -0.72154,   -0.64098,   -0.55201,   -0.45194,   -0.34815,   -0.23793,   -0.12528,   -0.00904,    0.09761,    0.18751,
+  -0.69167,   -0.95489,   -0.89304,   -0.80826,   -0.71013,   -0.60057,   -0.48226,   -0.36119,   -0.23999,   -0.11567,    0.01128,    0.12929,    0.22304,
+  -0.41023,   -0.56167,   -0.51855,   -0.45896,   -0.39059,   -0.30542,   -0.21282,   -0.12335,   -0.05091,    0.01319,    0.07226,    0.12658,    0.16316,
+  -0.17193,   -0.23419,   -0.21758,   -0.18663,   -0.15410,   -0.11460,   -0.08021,   -0.04418,   -0.01581,    0.01346,    0.03622,    0.06058,    0.07869,
+  -0.10852,   -0.17638,   -0.18566,   -0.16418,   -0.10060,   -0.07273,   -0.06668,   -0.05676,    0.00252,    0.05859,    0.08061,    0.07701,    0.10191,
+   0.84595,   -0.51807,   -1.55112,    0.47720,    0.76134,   -1.04124,   -0.55151,    0.84568,    0.04465,   -0.68262,    0.27772,    0.55861,   -0.21271,
+  10.74878,   -3.11310,   -5.34238,    2.26307,    3.04804,   -1.68683,   -1.57190,    2.02328,   -0.35717,    0.00393,   -0.15863,    1.01564,   -1.07620,
+  22.70843,   -6.61746,   -7.86647,    3.81820,    4.88015,   -2.06069,   -2.43454,    2.49714,   -0.36634,    0.39582,   -0.85246,    1.67405,   -2.18834,
+  32.18055,  -10.68944,   -8.76175,    5.36425,    5.02639,   -2.41739,   -2.50591,    2.78143,   -0.67211,    0.73401,   -1.31273,    2.36673,   -2.74303,
+  39.08526,  -14.58456,   -8.16102,    6.57589,    4.26688,   -2.79942,   -2.70583,    2.63534,   -1.05410,    1.76937,   -1.43361,    1.55572,   -3.38997,
+  42.88630,  -17.70767,   -7.25725,    6.54728,    2.44002,   -3.06865,   -3.13514,    1.98477,   -1.18996,    2.21320,   -1.35169,    1.13674,   -2.86322,
+  43.75336,  -17.89537,   -5.42091,    6.19822,    0.36815,   -3.06211,   -3.01817,    1.11116,   -1.11147,    2.06109,   -1.39334,    0.24111,   -2.88191,
+  39.96351,  -15.94618,   -2.38062,    5.83421,   -1.14522,   -1.73270,   -1.48879,    0.83592,   -1.51866,    1.71870,   -1.36848,   -0.22345,   -2.14575,
+  31.45315,  -11.09490,    1.79407,    4.30672,   -2.58627,   -0.17169,    0.32247,   -0.08480,   -2.12540,    1.29164,   -0.93414,   -0.57769,   -0.99019,
+  18.59060,   -3.90034,    4.51498,    2.55389,   -3.25637,   -0.47962,    0.40670,   -0.29496,   -1.78026,   -0.62591,   -0.17699,   -0.85637,    0.57991,
+  10.99932,    5.01000,    4.48573,    2.56734,   -0.08985,    0.69420,    1.14445,    0.44501,   -1.14982,   -1.17057,   -0.50301,   -0.86144,    1.36458,
+   7.27482,   18.54009,    5.61949,    3.31578,    3.53790,    1.19695,    1.36667,   -0.11593,   -0.76783,   -2.51154,   -1.72880,   -1.06676,    0.78223,
+   7.49068,   30.12584,    4.73965,    4.73910,    5.80762,   -0.88405,    3.03583,   -0.41851,   -0.74616,   -3.91174,   -3.07679,   -0.19967,    0.40673,
+   7.94285,   35.63343,    4.15385,    5.54752,    7.28455,   -2.47315,    4.79059,   -0.36822,   -0.76962,   -3.83732,   -4.03237,    0.22894,   -0.91120,
+   8.58590,   36.59988,    2.41544,    5.77743,    6.68436,   -3.89020,    4.89490,   -1.00768,   -0.56777,   -3.55960,   -3.76792,    0.58118,   -1.70878,
+   8.57344,   34.03123,    0.61296,    5.92270,    5.05179,   -5.00299,    4.20648,   -2.23552,   -0.11940,   -3.99699,   -3.23494,    0.89732,   -2.60078,
+   5.78229,   27.64637,   -0.36020,    6.15170,    3.17174,   -4.57662,    4.25798,   -2.29137,    0.27880,   -4.44962,   -1.97717,    0.88484,   -3.60586,
+   0.23718,   18.80261,    1.23752,    6.06667,    1.15746,   -3.21039,    3.56837,   -2.37208,    0.35882,   -4.56392,    0.27678,    1.79146,   -4.22465,
+  -9.93021,    7.58356,    3.76043,    4.10620,   -1.53005,   -2.03193,    1.05332,   -1.63585,    1.82193,   -4.44761,    2.51193,    2.43774,   -3.15035,
+ -24.19330,   -1.48371,    6.88449,    1.15200,   -1.69619,    2.34576,    0.24782,    0.73166,    3.00216,   -3.47517,    4.28729,    1.86801,   -1.40110,
+ -37.52070,   -4.30909,   11.90375,   -1.40707,   -1.02627,    7.12782,   -1.35290,    0.68745,    3.21353,   -1.50163,    4.93153,   -0.11838,    0.72051,
+ -43.55009,   -5.04015,   13.40414,   -3.67952,    0.01548,    8.99950,   -1.79994,    0.40766,    3.33770,    0.53111,    4.24657,   -2.02550,    3.04733,
+ -42.51932,   -4.89435,   10.95545,   -9.29525,   -0.69523,   10.09831,   -0.79636,    0.52587,    2.75109,    2.84201,    3.42491,   -2.54333,    4.17730,
+ -23.87650,   -3.56064,    3.61606,  -15.48069,   -1.35062,    7.06025,   -0.41084,   -0.89379,    0.17333,    5.28044,    2.53100,   -3.10096,    4.57176,
+  -1.95067,   -1.62599,   -4.36667,  -18.85973,   -2.11852,    3.97257,    0.07670,   -2.74860,   -2.55112,    6.29552,    1.41799,   -2.96217,    4.84084,
+  19.53974,    0.76802,  -11.17333,  -20.72311,   -3.47356,    0.60982,    2.05922,   -3.62263,   -4.16425,    5.85003,    1.12973,   -2.65552,    3.58460,
+  60.75739,   -5.08121,  -12.47585,  -16.81619,   -5.45903,   -1.35013,    0.77835,   -3.80548,   -3.64245,    4.57869,    0.85220,   -1.76367,    2.72508,
+  87.85258,   -9.92849,  -11.27633,  -12.00721,   -6.97649,   -3.14012,   -0.52290,   -3.06606,   -1.86302,    2.78719,    0.55353,   -0.15964,    1.95145,
+  99.86404,  -12.99408,   -8.63910,   -7.49278,   -7.42962,   -3.48943,   -1.11168,   -0.82875,   -0.63118,    0.76545,    0.59896,    0.65211,    1.27843,
+  98.39548,  -15.14276,   -3.97054,   -2.54129,   -6.74303,   -2.47134,   -2.03093,    0.20558,    0.41677,   -0.20560,    0.43909,    0.78924,    1.28537
+};
+
+static const float golden_diff2_features[] = {
+  16.21731,    0.19516,    0.20180,    0.20088,    0.19413,    0.19077,    0.19143,    0.18572,    0.16733,    0.14279,    0.12012,    0.09567,    0.06024,
+  43.27348,   -0.20258,   -0.18401,   -0.16553,   -0.14803,   -0.12548,   -0.09792,   -0.07352,   -0.05640,   -0.03998,   -0.01733,    0.00732,    0.02418,
+  56.11353,   -0.24310,   -0.23309,   -0.22323,   -0.21464,   -0.20435,   -0.19125,   -0.17725,   -0.16231,   -0.14209,   -0.11335,   -0.07929,   -0.04632,
+  44.80687,   -0.33941,   -0.36965,   -0.35402,   -0.30704,   -0.28633,   -0.29821,   -0.28407,   -0.21006,   -0.11928,   -0.06474,   -0.02990,    0.03805,
+  -0.10879,   -0.20161,   -0.26546,   -0.23390,   -0.14047,   -0.10829,   -0.15147,   -0.15231,   -0.05091,    0.05645,    0.06798,    0.03493,    0.07529,
+  -0.37346,   -0.52451,   -0.50858,   -0.47412,   -0.42533,   -0.37226,   -0.31718,   -0.25402,   -0.18147,   -0.10781,   -0.03950,    0.02753,    0.09911,
+  -0.19116,   -0.24038,   -0.18975,   -0.17430,   -0.17958,   -0.14767,   -0.07469,   -0.02263,   -0.02556,   -0.03835,   -0.00892,    0.04125,    0.05542,
+   0.11563,    0.19720,    0.24364,    0.22611,    0.16819,    0.15623,    0.20157,    0.22173,    0.17058,    0.11040,    0.11162,    0.14075,    0.11520,
+   0.18031,    0.29460,    0.34897,    0.32642,    0.25085,    0.21896,    0.24202,    0.22669,    0.12594,    0.01853,   -0.01105,   -0.00135,   -0.04710,
+   0.20499,    0.31708,    0.34884,    0.31497,    0.23562,    0.18721,    0.17936,    0.14123,    0.03911,   -0.06382,   -0.10252,   -0.10855,   -0.15534,
+   0.31046,    0.43420,    0.41051,    0.34902,    0.26344,    0.17682,    0.09930,    0.01492,   -0.07655,   -0.15488,   -0.20076,   -0.22568,   -0.24929,
+   0.12999,    0.14073,    0.06069,    0.01583,    0.00081,   -0.04401,   -0.11966,   -0.15633,   -0.11731,   -0.06064,   -0.05386,   -0.07838,   -0.06843,
+   0.26017,    0.17106,   -0.11744,   -0.07830,    0.15252,    0.12827,   -0.16917,   -0.29638,   -0.06758,    0.15656,    0.05404,   -0.15683,   -0.09470,
+   0.31485,   -0.11509,   -0.84687,   -0.54336,    0.20418,    0.10039,   -0.64470,   -0.76768,   -0.10500,    0.26783,   -0.10735,   -0.43370,   -0.08453,
+   0.75957,   -0.05233,   -1.31864,   -0.68615,    0.29225,    0.21304,   -0.31901,   -0.61384,   -0.24770,    0.54825,    0.53502,   -0.20244,   -0.13772,
+   1.88292,    0.41875,   -1.89908,   -0.88685,    0.26989,    0.21361,    0.20804,    0.06673,   -0.03850,    0.52586,    0.46733,   -0.19222,    0.06346,
+   3.29321,    0.50465,   -2.27539,   -0.18780,    0.21818,    0.09957,    0.50984,    0.42155,   -0.02883,   -0.14084,    0.16381,    0.40750,    0.17080,
+   7.79825,    1.50741,   -3.14751,    0.29748,    0.02551,   -0.65686,    0.49119,    0.87794,   -0.00355,   -0.16768,    0.24352,    1.50982,    0.08034,
+  14.00504,    0.91646,   -2.46539,    0.20880,   -0.50957,   -0.61261,   -0.18063,    0.83464,   -0.52641,   -0.13895,    0.09943,    1.74159,   -0.52868,
+  13.44142,    0.31483,    0.29234,    0.15242,   -1.14150,   -0.05969,   -0.42379,    0.83283,   -0.86010,   -0.92560,   -1.04317,    0.47985,   -0.55997,
+   7.22871,    0.02501,    2.49428,    0.95413,   -1.02842,    0.15923,   -1.01835,   -0.11145,    0.05258,   -0.72917,   -1.47458,   -1.18530,   -0.17015,
+  -1.61599,   -0.43003,    3.37022,    0.88134,   -0.13308,    0.03882,   -1.58968,   -0.96594,    0.62509,   -0.06993,   -1.16790,   -2.32724,   -0.06668,
+ -10.76176,   -0.49072,    3.29918,    0.07189,    0.00983,    0.42607,    0.01932,    0.04170,    1.57404,    0.85996,    0.00954,   -1.53939,    0.88909,
+ -17.49955,   -0.84642,    1.83108,   -0.41681,    0.20646,    0.43982,    1.17639,    0.01386,    1.51175,    1.19512,    0.82039,   -0.61095,    0.86442,
+ -18.20613,   -0.14520,   -0.01912,   -0.39252,    1.10353,    0.63736,    1.88209,    0.21828,    1.04752,    0.99000,    0.92230,    0.93419,    0.61598,
+ -12.11390,    1.46204,   -1.21052,   -1.04637,    1.22112,    0.17140,    2.24943,    0.65173,   -0.08588,    0.92601,    1.20127,    2.28213,    0.75185,
+  -3.51505,    2.54119,    0.00642,   -2.18675,    0.11835,    0.00227,    1.10610,    0.51876,   -0.93515,   -0.16250,    0.13794,    1.11413,    0.14248,
+   0.45022,    3.88328,    1.77821,   -2.05071,   -0.69773,    0.26972,    0.12809,   -0.71774,   -2.04822,   -2.04363,   -0.77679,   -0.64332,   -0.11925,
+   1.84052,    2.91784,    1.42999,   -0.91996,   -0.57730,   -0.33576,   -1.01007,   -1.70689,   -1.77488,   -1.43049,    0.02612,   -1.89165,   -0.00646,
+  -0.12880,   -0.53214,    1.48872,   -0.68723,   -0.84349,   -0.47665,   -0.65907,   -0.98961,   -0.54255,   -0.72874,    0.44080,   -2.24455,   -0.94040,
+  -4.97158,   -5.22039,    3.00956,    1.78392,   -0.31825,    0.91453,    0.94948,    0.53822,    0.87715,   -0.75816,    0.10799,   -0.14966,    0.57097,
+  -6.88770,   -7.83044,    3.55969,    4.53842,    0.33471,    0.39304,    0.72327,    1.03753,    1.80131,    0.30180,    0.80916,    1.57761,    0.06766,
+  -0.06472,   -2.73202,    2.67699,    3.19374,   -0.29062,   -0.56358,   -0.11369,    0.91659,    1.20609,    0.50170,    0.16801,    1.54581,   -1.30414,
+   7.96513,    2.19639,    1.12540,    1.29673,   -0.77663,   -1.71659,   -1.67205,    0.03821,   -0.39451,    0.56292,   -0.26644,    1.20030,   -1.91406,
+  20.36825,    7.16812,   -2.07462,   -2.56250,   -1.72555,   -2.07219,   -2.24737,   -0.62517,   -1.37638,    0.89459,   -0.92776,   -0.20225,   -2.23473,
+  27.52857,    8.49879,   -5.10473,   -4.96347,   -1.45094,   -1.23414,   -1.66458,   -1.39808,   -2.19753,    1.22582,   -0.15785,   -0.48237,   -2.32545,
+  22.42527,    3.69515,   -5.78014,   -4.11555,   -0.79674,   -0.46982,   -0.64408,   -0.16306,   -0.84121,    1.61191,   -0.09852,   -0.66065,   -1.43130,
+  11.24784,   -2.54764,   -4.99240,   -1.41613,   -0.71763,    0.07090,   -0.07168,    1.05152,    1.13015,   -0.00134,   -0.50929,   -0.42378,   -0.52650,
+  -2.27832,   -8.25114,   -3.73306,    3.31291,    1.59268,    0.15774,    0.36296,    1.77359,    2.16789,   -1.40649,   -1.20025,    0.35247,    2.46950,
+ -10.87834,   -8.49211,   -2.13050,    5.42344,    3.29342,   -2.09924,   -0.78673,    1.19661,    2.46813,   -1.00383,    0.29814,   -0.01854,    3.86095,
+ -12.64384,   -4.14936,   -0.63627,    4.12979,    3.06376,   -3.62388,   -1.78564,    0.15274,    0.27741,   -1.96116,    0.51165,   -0.51537,    3.92123,
+ -13.84193,   -2.73674,    1.04745,    4.18937,    3.25537,   -2.80288,   -2.02591,   -1.37560,   -1.85905,   -1.85010,    0.45583,    0.58390,    3.33817,
+ -11.25400,   -1.05981,    3.28878,    2.43213,    2.21390,   -0.98947,    0.08231,   -1.47618,   -2.38273,   -1.07941,   -0.22681,    0.86386,    1.48212,
+ -11.05228,   -0.22290,    5.64809,    1.93243,    1.05831,    1.77555,    1.93646,   -1.12377,   -2.22490,   -0.69184,   -0.95848,    0.70705,   -1.07224,
+ -12.78424,   -0.67527,    7.24214,    3.37218,   -0.79577,    4.67174,    3.65980,   -0.47327,   -1.43202,   -0.44532,   -1.34798,    0.34553,   -2.89207,
+ -11.10884,    0.57496,    5.36448,    2.60238,   -3.22833,    5.30668,    3.88445,   -0.72640,   -0.06072,   -0.23699,   -0.17798,    0.66642,   -3.76137,
+ -14.76188,    4.10834,    2.80884,   -0.11126,   -2.92619,    4.78836,    2.72440,   -0.89062,    1.61909,    1.01685,    0.55895,   -0.35090,   -1.62364,
+ -12.08655,    3.87674,    1.44833,   -2.20587,   -1.70860,    4.93422,   -0.33181,   -0.40690,    2.70387,    1.26864,    1.62162,   -1.39498,    1.41874,
+  -4.35015,    0.69686,   -1.40145,   -4.04673,   -1.39059,    3.77357,   -2.48010,    0.48856,    2.38896,    0.75792,    1.41704,   -0.90706,    2.51950,
+  -2.98126,   -0.57544,   -2.04650,   -5.84479,    0.28809,    0.99410,   -2.62742,   -0.09681,    1.11973,    0.93487,    0.81901,   -0.48789,    1.68075,
+   0.48137,   -1.78558,   -2.41798,   -6.14209,    2.28130,   -1.36703,   -0.85182,    1.26430,    1.03422,    1.56647,    0.16530,   -0.44342,    0.43913,
+   5.29263,   -2.91849,   -4.07649,   -5.20471,    2.50273,   -2.33617,    1.43612,    2.41752,    0.44709,    1.65795,   -0.35830,   -0.56680,   -0.91788,
+   8.78468,   -3.08363,   -4.82285,   -2.76874,    1.36147,   -2.90089,    3.06442,    2.46061,    0.10628,    1.45005,   -0.11077,    0.12710,   -1.69580,
+   9.94646,   -0.88849,   -3.99298,   -0.79378,   -0.24021,   -3.27466,    2.42517,    1.15397,    0.04365,    0.87672,    0.51490,    1.54578,   -1.26088,
+   6.50878,    1.91146,   -2.31437,    0.57698,   -1.07603,   -2.39338,    0.04244,   -0.10844,   -0.21124,    0.17367,    0.26625,    0.86430,    0.04497,
+   7.70786,   -0.67801,   -1.46316,    1.88686,   -2.15238,   -0.56492,   -2.20550,   -0.78685,   -0.63474,   -0.97331,   -0.04675,    0.38546,    0.91254,
+  24.54930,   -2.71407,   -2.79878,    1.09356,   -4.45266,   -1.62989,   -2.90559,   -2.40690,   -1.53768,   -1.00329,   -0.35777,    1.35590,    0.68131,
+  32.06574,   -1.15240,    0.55634,   -0.18682,   -3.40026,   -2.27297,   -1.81379,   -2.88574,   -0.99391,   -0.68378,   -0.21541,    1.12434,    0.46684,
+  28.45597,   -3.15990,    2.83928,    0.65308,   -1.67239,   -0.68778,   -0.77180,   -1.18102,    0.39176,   -0.48112,    0.06073,    1.04493,    0.33685,
+  14.59797,   -5.13138,    3.18517,    2.50967,    1.11177,    0.57163,   -0.42486,    0.98442,    1.48556,   -0.83740,   -0.23931,    0.73557,    0.17675,
+  -3.72845,   -3.51124,    1.55782,    3.41259,    3.04399,    0.40921,   -0.68759,    1.95865,    1.00030,   -1.01141,   -0.23681,   -0.06809,    0.44259,
+ -21.61495,    2.89444,   -0.54097,    3.40615,    3.46339,   -0.14803,   -1.11945,    1.84813,   -0.62411,   -0.52742,    0.33278,   -0.95693,    0.37680,
+ -33.78760,    9.00981,   -2.74898,    1.85490,    3.27310,    0.25686,   -0.82873,    1.25507,   -1.14359,    0.12123,    0.76512,   -1.72360,   -0.51226,
+ -31.51916,   14.52491,   -1.82974,   -0.18268,    2.48123,    0.26023,    0.34134,    0.86735,   -2.05563,    0.18035,   -0.15100,   -2.20650,   -1.42871,
+  -8.13280,   16.60238,   -0.28863,   -2.56735,    0.05195,   -0.53726,    0.79647,   -0.75513,   -2.34694,    0.74106,   -1.55933,   -2.39823,   -1.98695,
+   5.05126,   13.15464,    3.60279,   -4.34883,   -0.80506,   -0.09033,    0.36513,   -1.74143,   -0.82592,    0.41468,   -2.75943,   -2.80814,   -2.28767,
+   7.32411,    4.78275,    5.29998,   -4.34229,   -1.44093,    0.81103,    0.02437,   -2.32194,    0.24427,    0.33826,   -1.67193,   -0.96206,   -1.24182,
+   9.10812,   -4.59359,    1.96367,   -2.91030,   -0.87951,    1.50810,   -0.30948,   -2.06597,    1.81809,    0.25831,    0.14130,    0.95121,   -0.05634,
+   6.96846,  -11.21001,   -1.88491,   -2.25561,   -2.15953,    0.73442,   -0.77903,   -2.01683,    1.21401,    0.31835,    1.63315,    1.29029,    0.85059,
+   1.13596,  -11.05882,   -3.52247,   -0.25600,   -3.23051,    0.99667,   -0.03908,   -1.50529,    0.06820,    0.86959,    2.75778,    1.28492,    1.31335,
+  -3.70472,   -7.35983,   -2.95527,    1.77781,   -1.96865,    1.57230,    1.82800,   -1.06835,   -0.53964,    0.59958,    2.55650,    1.57267,    0.93750,
+  -5.04297,   -1.50997,    0.15249,    2.92941,   -1.16833,    0.70852,    3.91727,    0.42658,   -1.21815,   -0.11621,    0.80285,    1.50611,    0.53641,
+  -4.06056,    2.18459,    1.73452,    2.38119,   -0.74181,   -0.17670,    3.29103,    0.68917,   -1.78614,   -0.15311,   -1.04603,    0.92786,    0.06090,
+  -2.86080,    2.13510,    2.38423,    0.81881,   -0.49837,   -0.42827,    1.61950,    0.76231,   -0.98939,   -1.00621,   -2.06018,    1.11499,   -0.19129,
+  -1.93483,    0.82137,    2.78030,    0.64130,    0.56392,    0.21727,    0.76120,    0.71704,   -0.30525,   -1.11435,   -0.98799,    1.28927,    0.11555,
+   0.68825,   -2.47889,    1.71057,    1.32855,    2.12561,    0.48081,   -1.00540,    1.64850,    0.79676,   -0.66252,   -0.64123,   -0.02105,   -0.11619,
+   0.48389,   -4.33922,    0.98819,    1.66829,    2.46791,   -0.48849,   -2.91387,    2.05927,    0.34919,   -0.05767,   -0.74559,   -1.72925,   -0.26099,
+   0.24245,   -2.77404,    0.23601,    2.20284,    3.88731,   -1.43572,   -2.23418,    2.44810,    0.90633,    0.54946,   -0.57527,   -3.42200,   -0.07661,
+   0.17565,   -1.35679,   -0.84536,    2.18870,    4.77929,   -2.31978,   -1.05009,    2.80705,    1.24735,    0.16229,   -0.10471,   -3.42125,    0.31608,
+  -1.18938,   -0.77795,   -1.36764,    1.86921,    3.31434,   -2.76629,   -0.22201,    3.36896,    1.53384,   -0.94211,    0.32354,   -2.05125,    1.23954,
+  -3.13374,   -0.91509,   -0.97772,    1.65283,    1.83681,   -2.39579,   -0.00488,    2.36957,    0.36397,   -1.91926,    0.74306,   -0.34365,    0.65549,
+  -4.97267,   -0.96364,    0.13713,    0.91143,   -1.31509,   -1.00163,    1.03233,    0.69180,   -0.72927,   -2.78508,    1.32490,    1.56583,    0.35754,
+  -6.85945,   -1.28193,    2.03452,    1.50264,   -2.92260,    0.98349,    1.41435,   -1.51727,   -1.21326,   -1.86687,    1.87517,    2.28697,    0.32175,
+  -7.11939,   -1.01505,    3.67222,    1.59803,   -3.20086,    2.97749,    1.06817,   -3.10749,   -0.99417,   -0.26336,    0.79722,    2.42876,    0.23495,
+  -5.31246,    0.28673,    3.27009,   -0.19856,   -2.88602,    4.07824,   -0.00856,   -4.00545,   -0.20722,    1.62717,    0.14996,    2.04044,   -0.79287,
+  -1.39747,    1.09174,    1.50430,   -2.01806,   -0.80691,    3.93254,    0.23631,   -3.97883,    1.33579,    2.66006,   -0.62404,    0.95474,   -0.59348,
+   3.48982,    0.89344,   -1.34445,   -3.53587,    0.67709,    2.90906,    0.09460,   -1.85039,    1.73261,    2.93600,   -0.79489,   -0.18817,    0.14495,
+   8.41412,    1.19195,   -4.37971,   -4.15560,    1.22093,    0.56913,   -0.53951,    0.53321,    1.87599,    2.13698,   -1.20868,   -1.36792,   -0.11842,
+  11.79348,    2.07406,   -6.59916,   -3.18767,    0.80053,   -3.53592,   -1.01774,    2.37130,    0.55460,    0.43783,   -1.15931,   -1.65250,   -1.28935,
+  12.09620,    3.57289,   -6.70376,   -1.96437,   -0.15075,   -5.44186,   -1.12845,    3.54374,   -1.06725,   -1.74863,   -0.28421,   -0.71038,   -0.87711,
+   9.04450,    4.26321,   -5.62701,   -1.01178,    0.58143,   -4.83820,   -1.86991,    2.57650,   -1.34064,   -2.74849,   -0.25834,   -0.20044,   -0.32551,
+   4.86684,    5.19290,   -2.60689,   -1.42399,    0.79326,   -2.28328,   -2.04050,    1.21844,   -0.85736,   -1.27215,   -0.81190,    1.08856,    0.99834,
+   0.01623,    4.37000,    0.22282,   -2.03165,    0.58962,    1.50096,   -1.27020,   -1.11682,   -0.50553,    1.81982,   -0.18143,    1.41350,    1.26103,
+  -4.79588,    1.68520,    1.65410,   -2.66616,   -0.25647,    4.96139,    1.14134,   -3.99812,   -0.57983,    3.27449,    0.15974,    0.03994,    0.64050,
+  -6.52693,   -0.79567,    3.55107,   -2.63971,   -0.98564,    5.81594,    3.22678,   -3.99359,   -0.95776,    3.42637,    1.42837,   -0.96330,   -0.26422,
+  -7.08716,   -2.52728,    5.10914,   -1.21147,   -2.29996,    4.11644,    3.75145,   -2.84352,   -1.31686,    1.28807,    1.17904,   -0.91478,   -0.46273,
+  -5.96604,   -2.75027,    6.02725,    0.86845,   -3.49258,   -0.38843,    3.00652,   -0.18185,   -1.16662,   -1.59567,    0.34567,   -0.04521,    0.16324,
+  -4.59670,   -1.91198,    5.15809,    2.62574,   -3.25644,   -2.83582,    1.15669,    2.52683,    0.44425,   -3.77954,   -0.16283,    0.55103,    1.26083,
+  -3.34087,   -0.99035,    2.62799,    3.01482,   -2.23738,   -3.71878,   -0.89664,    3.38273,    2.46827,   -3.69741,   -0.99399,    0.64024,    1.23144,
+  -1.09569,    0.10035,    0.70302,    2.90472,   -0.58142,   -3.26729,   -2.55281,    3.23568,    3.81462,   -1.09265,   -0.57576,    0.36919,    0.72021,
+  -0.69000,   -0.22143,   -0.89592,    3.11012,    0.69255,   -1.41779,   -3.33030,    1.90778,    3.97646,    1.20440,   -0.33052,   -0.58568,   -1.02687,
+  -0.46983,   -0.75360,   -2.78557,    2.20918,    2.17967,    0.89865,   -3.16726,   -0.33005,    1.89780,    1.72530,    0.15660,   -0.91208,   -2.67401,
+   0.86113,   -1.10262,   -3.73482,    1.04196,    3.74525,    1.45714,   -1.97777,   -1.12089,   -0.05726,    1.75960,    0.89571,    0.04996,   -2.26333,
+   1.54331,   -1.65894,   -5.35786,    0.17701,    3.73783,    2.10178,   -0.60824,   -2.08271,   -2.10277,    0.68544,    0.79497,    1.22325,    0.20415,
+   2.75545,   -2.01349,   -5.84217,   -1.25973,    2.98491,    2.32762,    1.40892,   -0.78433,   -1.92256,    0.31187,    0.00115,    0.88398,    1.76278,
+   2.43094,   -1.80217,   -4.50456,   -1.03065,    1.38983,    2.07960,    2.15277,   -0.15730,   -1.19875,    0.30760,    0.11612,    0.60493,    2.35047,
+   0.84679,   -2.47226,   -2.41982,   -0.74440,    0.38131,    1.50645,    1.98511,   -0.89829,   -1.23095,    0.38567,    1.11637,    1.32482,    1.85701,
+  -1.04035,   -2.75775,    0.72750,   -0.18247,   -0.43029,    0.55353,    1.11439,   -1.03401,   -1.09463,    0.19856,    2.27336,    0.67122,   -0.84261,
+  -3.09812,   -3.04583,    3.51253,    0.81631,   -1.46276,   -2.09959,   -2.16450,   -1.61351,   -0.33714,    0.53306,    1.14867,   -1.37724,   -2.10640,
+  -3.04177,   -1.70739,    5.31536,    2.60671,   -0.76392,   -3.78547,   -3.50987,   -0.72561,    0.30075,    0.00962,   -0.43289,   -1.77595,   -1.42549,
+  -1.36122,   -0.29429,    5.64058,    3.42238,   -0.88336,   -5.49962,   -3.17181,    0.28307,    0.15652,   -1.71665,   -1.86693,   -1.55986,   -0.67926,
+  -0.05605,    0.50171,    3.52788,    2.23423,   -1.71910,   -4.30363,   -1.43891,    0.71553,   -0.85863,   -2.38490,   -1.61935,   -0.42899,    0.09333,
+   1.95913,    1.36761,    1.86388,    0.36330,   -1.66910,   -1.97065,    1.11231,    2.01403,   -0.54413,   -1.66595,   -1.34725,    0.19886,    0.48335,
+   4.00524,    2.36195,    0.29345,   -1.20186,   -1.95526,   -0.41043,    2.61748,    1.72425,   -0.64939,   -0.40865,    0.01834,    0.99658,    0.33514,
+   5.74574,    2.67030,   -1.71518,   -2.68293,   -1.74787,    0.84111,    2.96371,    0.60119,   -0.91063,    0.09193,    1.28830,    1.29943,    0.38961,
+   9.16844,    3.65109,   -1.55293,   -2.48842,   -0.65675,    2.25802,    2.97735,   -0.45119,   -0.26118,    0.77344,    1.19886,   -0.06256,   -1.31736,
+  14.03404,    5.33166,   -2.18882,   -1.59919,    1.41859,    2.15699,    0.85363,   -1.24766,    0.62711,    1.76408,    0.17662,   -1.57600,   -1.28843,
+  15.93616,    4.98503,   -1.13437,    0.10328,    2.26967,    1.94326,    0.53132,   -0.14357,    1.72593,    0.85668,   -0.54843,   -0.39603,   -0.41069,
+  12.32546,    2.02964,   -0.79794,    1.01409,    1.32153,    0.95630,   -0.38356,   -0.11529,    0.04899,   -0.15901,   -1.38836,    0.45448,    0.89047,
+   3.22332,   -2.96326,   -0.99718,    0.87723,    0.40621,    0.65367,   -0.96395,   -0.43524,   -0.63281,    0.29420,   -0.42521,    0.59194,    1.82136,
+  -4.69443,   -5.77902,   -0.61340,    0.47730,   -0.38476,    0.82694,   -1.10198,    0.46039,   -0.13309,   -0.35774,   -0.16382,    0.47541,    1.05309,
+ -10.69698,   -5.97465,   -0.89254,   -0.53134,   -0.95255,   -0.01493,   -1.16917,    1.09365,    0.38539,   -0.46845,    0.28823,    0.77107,    0.26669,
+ -14.42573,   -2.34556,    0.21167,   -0.54150,   -1.33387,   -0.92884,   -1.04867,    1.61572,    1.16332,   -0.65551,   -0.20347,    1.25553,    0.04953,
+ -17.57152,    2.88091,    2.18779,   -0.04547,   -0.37261,    0.80963,    0.38018,    1.57617,    3.28357,    0.13170,   -0.44664,    0.37655,   -0.28389,
+ -14.15765,    5.55117,   -0.39026,    0.29892,    1.96548,    4.02907,    1.08338,   -0.94065,    2.46001,   -0.13199,   -1.42123,    0.63745,    0.40805,
+  -1.09724,    2.01740,   -1.83111,   -0.83400,    0.98159,    3.77382,    1.09521,   -1.62366,    1.60375,    0.05070,   -1.13992,    1.16497,    1.08691,
+   6.62698,   -1.02055,   -0.89974,   -0.98594,   -0.12853,    0.60213,   -0.30740,   -1.44273,   -0.70436,    0.90354,   -0.16535,   -0.49715,    1.39288,
+   8.37336,   -2.40382,   -0.95125,   -1.39960,   -0.29384,   -2.34899,   -0.69393,   -0.84604,   -3.07693,    0.86963,    1.33303,   -1.90806,    0.30474,
+   9.09226,   -2.35774,   -0.59003,   -2.48865,   -0.22981,   -3.82534,   -0.72252,    0.12832,   -4.10311,    0.43712,    1.39041,   -2.10518,   -1.41038,
+   7.50743,   -0.70278,   -0.37272,   -3.56151,    0.48566,   -4.81038,   -1.70290,    0.77569,   -3.84390,    0.77642,    1.18686,   -1.44140,   -1.27691,
+   2.50409,    1.52394,    0.74946,   -2.17047,   -0.06193,   -3.51532,   -1.92592,    1.04794,   -1.62890,    0.11128,    0.43504,   -0.96312,   -0.99034,
+  -5.22338,    1.67406,    1.82973,   -0.32132,   -0.63252,    0.02896,   -1.02435,   -0.26692,    1.60749,   -0.53685,   -0.22094,   -1.20691,    0.36702,
+  -4.58686,   -0.14313,    0.00440,    0.93010,   -0.13779,    3.95035,    0.10185,   -2.79772,    2.07685,   -1.03115,   -1.39511,    0.04919,    1.56523,
+   0.39207,   -1.65948,    0.05167,    0.69743,   -0.84583,    4.02613,    1.06878,   -1.80613,    3.03130,    0.09818,   -0.74767,    0.97279,    1.37240,
+  -3.67383,   -0.08797,    1.94511,    1.78755,   -0.49418,    2.29842,    2.07127,   -0.99623,    2.53297,    0.82346,    0.10110,    0.16294,    0.38437,
+  -7.04397,   -0.79178,    2.98282,    2.64806,   -0.53676,    1.59799,    2.67209,   -0.37433,    1.24542,    0.88541,    0.39583,   -0.27205,   -0.80536,
+  -4.75853,   -5.44884,    5.40727,    2.14419,   -0.66403,    1.43901,    1.27792,    0.74164,    0.40086,    0.59947,    0.65964,    0.09008,   -1.85426,
+  -0.80910,   -8.15322,    7.01508,    1.74411,    0.55960,    2.30439,    0.38024,    1.74539,   -0.04146,    0.30871,    0.78478,    0.82254,   -1.78181,
+   1.55233,   -8.47263,    6.00203,    2.18416,    0.95761,    2.26771,   -0.28012,    1.41688,   -0.58115,   -0.77729,    0.76535,    1.19218,   -0.95701,
+   1.29372,   -6.79743,    1.55503,    0.30901,    0.57075,    0.15209,    0.14880,    2.44755,   -0.03292,   -1.43924,    0.03932,    0.16937,   -0.24720,
+   1.80636,   -1.49374,   -3.37284,   -0.99571,    0.91656,   -1.79310,    0.63861,    2.60025,    0.04679,   -1.08674,   -0.35133,    0.62593,    0.91322,
+   0.68870,    3.68574,   -7.25332,   -2.06033,    1.34142,   -2.86425,    0.92081,    1.84401,    0.23503,    0.11824,   -0.64202,    0.95494,    1.27660,
+  -1.90220,    7.11677,   -7.94954,   -1.50408,    0.71101,   -3.37488,    0.84299,    0.23825,    0.36986,    0.13650,   -0.21722,    0.56112,    0.53745,
+  -4.09752,    7.19482,   -5.89078,   -0.06701,   -1.19444,   -3.30172,   -0.08174,   -1.53648,   -0.54018,   -0.41243,   -0.20274,   -0.70925,   -0.84675,
+  -5.05081,    6.15473,   -2.14298,    1.96321,   -0.77681,   -0.88775,   -0.60982,   -2.05754,   -1.57310,    0.20186,    0.95024,   -0.09381,   -1.71482,
+  -5.45865,    9.16485,   -0.63835,    2.63683,    3.46940,    2.50331,   -0.89519,   -2.86652,   -2.68339,   -0.42777,    0.46653,   -0.39933,   -0.94980,
+  -7.15794,   14.07588,   -1.48734,    1.17906,    4.65874,    1.20143,   -2.10305,   -2.96969,   -3.33467,   -1.49633,    0.35106,   -0.71336,   -0.29812,
+  -8.20444,   14.70248,   -1.63056,   -1.80849,    1.70856,   -1.71557,   -2.02139,   -0.32258,   -3.55417,   -1.30519,    0.26541,   -0.67806,    0.09364,
+  -4.39986,   10.60242,    0.44664,   -1.66287,    0.12941,   -3.63376,   -1.21170,    0.98407,   -2.61885,    0.09384,    0.55667,    0.57674,    1.04438,
+   0.64806,    2.04062,    1.62402,   -2.34527,   -1.60279,   -3.60193,    0.11853,    1.67965,   -0.51766,    1.37388,    0.41398,    0.75521,    1.42050,
+   5.01791,   -6.15281,    2.48339,   -1.08979,   -3.23749,   -2.58196,    1.27918,    1.82403,    1.16025,    0.95175,   -0.34997,    0.16514,    0.96616,
+   8.83378,  -11.30906,    2.31181,    0.38729,   -3.68626,   -0.78698,    1.80967,    1.37946,    1.47948,    1.08950,   -0.95211,   -0.45951,    0.28065,
+  10.05825,  -12.78813,    2.02912,    1.80898,   -1.54103,    2.77833,    1.83379,    0.91330,    2.08963,    1.27430,   -0.99111,   -0.52263,   -0.20687,
+   9.49942,   -8.71228,    0.17933,    0.86607,    1.12210,    4.19081,   -0.04120,   -0.08533,    2.07753,    0.13819,   -1.49305,   -1.03974,   -0.17264,
+   4.96108,   -2.72504,   -0.90154,   -0.90986,    0.13664,    1.89568,   -1.55447,   -1.30247,    1.80273,   -0.85200,   -0.81179,   -0.69120,   -0.38906,
+   0.08487,   -0.80578,    0.30573,   -1.63767,   -1.27700,    0.27901,   -1.99012,   -1.42654,    0.81261,   -0.51483,    0.30977,    0.62867,   -0.01883,
+   0.24185,   -2.81577,    0.71859,    0.22825,   -2.04332,    0.98005,   -0.73467,   -1.35773,    2.14691,   -0.81958,    0.42843,    1.63667,    0.12070,
+   2.54637,   -2.06032,   -0.37911,   -0.09031,   -2.84493,    1.05361,    0.05539,   -1.33701,    2.37299,   -0.80721,    0.54231,    1.50378,    0.22601,
+  10.74993,    0.46409,   -3.19143,   -1.84183,   -3.66671,    1.15522,    0.10569,   -0.68615,    1.52641,   -0.12650,    0.21268,    0.26509,   -0.17750,
+  13.92211,    1.72096,   -5.49705,   -3.79817,   -2.20497,    0.10411,    0.77840,   -0.50270,    0.24503,    1.44755,    0.86626,   -0.10963,   -0.99623,
+   9.93487,    0.92591,   -5.95206,   -3.68119,   -0.19081,   -1.22371,    2.17165,   -0.46051,   -0.53758,    2.64064,    0.73742,   -0.23889,   -1.42306,
+   2.45853,    0.53382,   -3.86704,   -2.38111,    1.06630,   -2.53741,    2.84142,    0.01277,   -1.36106,    2.59209,   -0.01666,   -0.18922,   -0.74878,
+  -5.16603,    1.10071,   -1.02587,   -0.65498,    1.71131,   -2.87453,    2.04240,    0.47719,   -2.17233,    1.02821,   -0.72409,    0.26941,   -0.00607,
+ -12.43123,    0.30608,    3.68857,    2.96309,    1.55630,   -0.98385,    0.61345,    1.30019,   -1.16512,    0.17263,   -0.45493,    1.32836,    0.42314,
+ -13.59440,   -1.44457,    6.62413,    5.32542,   -0.33035,    1.82161,   -0.62439,    1.11572,    0.18399,   -1.42046,   -0.68679,    1.32077,    0.72066,
+  -9.34379,   -0.82807,    5.77036,    4.03872,   -0.26507,    2.48931,   -1.93166,    0.90906,    0.50134,   -1.90245,    0.14190,    0.16543,    0.41823,
+   0.83365,   -0.15804,    3.34869,    1.48043,    0.92773,    3.18556,   -1.71020,    1.11097,    0.62936,   -1.85763,    0.67584,   -0.91246,    0.15532,
+   3.58294,   -0.49183,    0.86556,   -0.32031,    2.82222,    1.47408,   -0.95646,    0.70039,    0.36673,   -1.66082,    1.52451,   -1.44978,   -0.46740,
+   2.82632,   -0.49331,   -1.30602,   -0.76875,    3.20566,   -0.58260,   -0.59764,    0.17135,   -0.02765,   -1.11568,    0.85042,   -1.88961,   -0.51674,
+   0.73548,    1.22593,   -1.53706,    0.01615,    2.85568,   -1.87749,   -1.63739,    0.51648,   -0.47562,   -0.43747,   -0.16051,   -2.08342,    0.01977,
+  -1.50833,    3.63719,   -0.79696,    1.79341,    2.06264,   -1.83228,   -2.71805,    0.76524,   -0.78004,    0.01473,   -1.01369,   -1.91239,    1.07460,
+  -7.12200,    4.73549,    1.25465,    3.98537,    0.95340,   -0.75236,   -2.93568,   -0.05724,   -0.41303,    0.14127,    0.00988,   -1.37547,    2.44927,
+ -16.05533,    3.48974,    3.98671,    4.36116,    1.44413,    0.49505,   -1.03298,   -0.75418,   -0.42939,    0.38396,    0.34566,   -0.11899,    3.21039,
+ -20.02677,    1.44514,    5.73581,    2.33987,    2.03818,    0.82477,    0.74071,   -0.12468,   -0.08709,    0.82213,    0.27651,    1.10990,    2.00585,
+ -16.45775,   -1.12825,    5.86567,    0.09403,    1.94631,    1.31943,    2.83514,    0.64633,   -0.02941,   -0.10645,   -0.51430,    2.29224,    0.57413,
+ -12.04607,   -5.48223,    4.53004,   -2.18355,   -0.55953,    1.53644,    3.79456,    0.37770,    0.23428,    0.53815,   -0.86937,    1.46214,   -1.51234,
+  -5.08613,   -9.16066,    1.71684,   -2.63321,   -3.00263,   -0.10397,    2.73513,    0.02325,    0.29299,    0.52085,   -1.59154,    0.82633,   -1.91742,
+   5.18221,   -8.78437,   -1.97695,   -2.58549,   -4.10144,   -0.43996,    1.85184,    0.59036,    0.02410,   -0.49077,   -2.30230,   -0.02483,   -1.99048,
+  13.50362,   -5.64577,   -5.21514,   -1.81849,   -3.74644,    0.03933,    1.61908,    0.93424,   -0.06135,   -1.62124,   -2.24047,   -0.76083,   -0.41216,
+  13.04554,   -3.18341,   -6.84896,   -0.21182,   -1.79743,    0.16220,    0.40382,   -0.30199,   -0.28752,   -1.11176,    0.41599,   -0.55934,    0.31086,
+   8.12199,   -0.27922,   -5.83939,    0.04151,    0.92007,    0.47225,    0.06853,   -0.45388,   -0.06354,    0.44807,    1.77689,    0.39586,    0.24896,
+   6.26660,    2.21433,   -4.39397,   -1.09668,    1.60584,   -0.52127,   -1.69893,   -0.80409,    0.01291,    1.24749,    2.14532,    0.70911,   -0.64410,
+   5.37495,    2.85567,   -2.75570,   -1.11314,    1.26053,   -0.62689,   -2.15417,   -1.07443,    0.21577,    1.20760,    1.96039,    0.99756,   -0.54196,
+   2.45326,    2.08029,   -0.18036,   -0.75242,   -0.15587,   -0.52899,   -1.64229,   -1.44529,    0.51506,    2.10977,    1.70373,    0.17180,   -0.51534,
+   7.62515,    4.58346,    0.17525,    1.27628,    0.46337,   -2.61314,   -3.68955,   -1.21973,    0.39711,    0.62896,    1.13624,    0.38422,   -0.05342,
+  28.63886,    8.07296,   -0.43933,    0.56066,   -0.11386,   -2.21033,   -2.54121,    0.73336,    0.59858,   -1.68698,   -1.13978,   -0.28280,   -0.60486,
+  33.05630,    8.21598,   -0.62197,   -0.68171,   -0.86102,   -1.02507,   -0.56918,    1.16628,    0.49057,   -1.98812,   -2.13810,    0.03607,   -0.29178,
+  19.35886,    5.08679,   -0.83876,   -2.11066,   -2.24766,   -0.16469,    1.91195,    0.52344,   -0.19797,   -0.21282,   -1.43950,    0.52516,   -0.51290,
+   1.05964,    2.25556,    1.14766,   -3.16166,   -3.43586,    0.39448,    4.07371,   -0.14690,   -1.13379,    1.46506,   -0.85294,    0.96596,    0.32788,
+ -16.47921,   -0.37634,    4.38852,   -1.72560,   -2.73570,    0.38875,    3.80922,   -1.90740,   -2.70927,    2.43815,   -1.57970,   -0.20923,   -0.03821,
+ -27.79869,   -0.09178,    7.68185,    0.54952,   -2.45296,   -0.10436,    2.05334,   -2.55811,   -3.09893,    2.04949,   -1.75198,   -0.61015,    0.16000,
+ -29.08426,    1.79115,    7.11112,    2.12899,   -2.30041,   -1.32327,   -0.17704,   -1.42358,   -1.49022,    1.48503,   -0.80154,   -2.12979,   -0.42513,
+ -16.58322,    3.19567,    3.07473,    4.25485,   -0.51870,   -1.47221,   -3.10022,   -0.09613,    0.26226,   -0.15524,    0.64078,   -2.16681,   -0.43177,
+   4.19846,    0.74735,   -0.81213,    3.26574,   -0.34897,    0.15579,   -2.97701,    1.42923,    1.36501,   -2.43711,    0.07199,   -1.18562,   -0.30232,
+   7.04351,   -1.93867,   -5.04837,    0.33955,    0.26167,    1.23221,   -1.89839,    1.61539,    2.00777,   -3.04884,   -0.04032,    0.56949,   -0.04057,
+   4.17853,   -3.84825,   -7.78771,   -3.02794,    1.64138,    1.63907,    0.40393,    2.35928,    2.61261,   -2.52924,    1.58602,    1.46927,   -0.19888,
+   2.81883,   -4.79352,   -6.55991,   -4.51712,    5.33195,    2.31995,    1.32159,    1.94338,    1.76991,   -3.49259,    2.76889,    2.00009,    0.38291,
+   3.30576,   -4.24379,   -4.64981,   -4.03144,    8.75425,    3.06653,    1.11204,   -0.49300,   -0.39239,   -4.27045,    3.12231,    1.21235,    0.55950,
+   3.65417,   -1.87101,   -1.98393,   -2.56431,    8.83752,    2.52297,   -0.25835,   -2.24798,   -2.04056,   -2.91090,    4.09639,    0.87054,    0.95408,
+   1.66876,   -1.09840,   -0.87220,   -0.07665,    6.55136,    0.85409,   -2.24251,   -2.67509,   -1.48777,    0.28572,    3.47642,   -1.21795,    0.40747,
+  -1.45178,   -2.86295,    1.16961,    3.82425,    3.41072,    0.44732,   -4.34542,   -2.26515,   -0.07209,    2.41577,    0.68121,   -1.74238,    0.51740,
+  -3.53259,   -4.20449,    3.89107,    6.81943,   -0.81615,   -1.06892,   -4.99324,   -0.61748,    1.44100,    3.22921,   -2.85996,   -1.25922,    0.03446,
+  -4.01745,   -3.92943,    4.96052,    7.61386,   -4.94968,   -2.68325,   -2.57150,    1.85646,    2.97579,    3.07190,   -4.93034,   -1.02494,   -1.25283,
+  -4.55216,   -3.11318,    5.67682,    6.07460,   -6.83842,   -2.52929,    2.05827,    4.41438,    3.24786,    2.01340,   -4.49917,   -0.46858,   -1.99564,
+  -3.57136,   -1.45291,    5.25936,    3.12338,   -5.65845,   -1.49985,    5.85940,    3.96404,    1.07936,    0.50942,   -2.73902,    0.97716,   -1.19766,
+  -1.63437,    0.09951,    2.99573,   -0.20278,   -3.60439,   -0.20191,    6.98759,    1.52803,   -1.94480,    0.52486,    0.13909,    1.53447,    0.45609,
+   0.54424,    2.07960,    0.34316,   -3.33270,   -0.69932,    0.56253,    4.73373,   -0.32955,   -3.64980,    1.72959,    1.96386,    1.03045,    1.12667,
+   1.33631,    2.37755,   -1.87441,   -4.59423,    0.92868,    2.06332,    1.00696,   -1.41669,   -2.16562,    1.62258,    1.16338,   -0.60858,    2.02551,
+   2.16312,    0.69768,   -1.89097,   -4.51454,    0.65384,    2.74707,   -2.52128,   -1.70274,    0.08868,    1.57268,    0.03372,   -2.16884,    2.68824,
+   3.34501,   -2.81341,    0.01824,   -2.27004,   -0.60666,    2.73602,   -3.03987,   -0.84670,    1.59907,    0.48251,   -0.64083,   -1.95746,    1.86315,
+   2.35604,   -5.69370,    1.99409,   -0.58733,   -1.79226,    1.87871,   -1.11883,   -0.60441,    2.69282,   -0.76227,    0.77583,   -0.36741,    0.81042,
+   0.76722,   -5.76291,    2.03095,    0.20537,   -1.45271,    1.17989,    1.10953,   -0.19568,    3.11318,   -0.53355,    0.89315,    0.69284,   -0.69185,
+  -0.64447,   -1.93302,    0.00437,   -0.48542,   -0.01937,    0.06378,    2.04712,   -0.86621,    0.87357,    0.74088,    1.42457,    1.72469,   -1.22077,
+  -1.85081,    3.54681,   -2.57017,   -1.37412,    0.87132,   -1.14356,    1.02429,   -0.88191,   -1.41294,    0.11289,    0.88510,    1.98091,   -1.28046,
+  -1.28988,    9.33262,   -4.66195,   -2.01650,    1.35927,   -1.85700,   -1.28144,   -1.31423,   -2.01477,   -0.89281,   -0.53515,    0.94998,   -1.38835,
+  -1.61862,   12.59195,   -5.25156,   -1.35804,    0.88209,   -1.47722,   -2.90052,   -1.59517,   -2.67036,   -1.73913,   -1.15066,   -0.53627,   -0.70888,
+  -2.16144,   13.02065,   -4.71302,   -0.60855,   -0.72887,   -2.12651,   -3.29260,   -1.59289,   -2.94301,   -1.37787,   -1.51249,   -1.28468,   -0.93711,
+  -2.48996,   10.08527,   -3.78712,    0.59486,   -0.44427,   -1.56782,   -1.47537,   -0.50152,   -1.54615,   -2.45176,   -0.88758,   -0.79308,   -1.51723,
+  -3.37114,    5.22081,   -2.15569,   -0.03493,    0.84560,   -2.38960,    0.53854,    0.20923,    0.41152,   -3.03330,    0.90126,    0.83144,   -0.27947,
+  -3.66135,    1.14919,   -1.63629,    0.48386,    0.79058,   -1.71151,    1.97203,    0.70343,    1.67358,   -1.25116,    1.09744,    0.91605,   -0.61355,
+  -5.64232,   -1.37539,    0.53593,    0.65117,    1.02433,   -0.42820,    2.06895,    1.76367,    0.83862,    0.82207,    1.50662,    0.67475,   -0.27118,
+  -8.30937,   -3.99281,    4.73570,    1.26622,    2.33311,    0.37241,    1.69835,    2.00754,   -1.43329,    1.18721,    0.58491,    0.79643,    1.20485,
+  -6.45230,   -5.18484,    6.87114,    1.22436,    2.44002,    1.83408,    1.04069,    1.26849,   -1.87028,    2.25718,   -0.53987,   -0.57861,    2.23996,
+  -2.87819,   -5.12779,    6.66136,    1.42515,    1.59922,    2.15331,    0.51503,    1.04683,   -2.14922,    2.96057,   -0.44765,   -1.20753,    2.45751,
+   2.55963,   -4.19373,    3.12443,    0.91964,    0.52124,    1.39843,   -0.03413,    0.92559,   -0.64958,    2.35276,   -0.75760,   -0.70371,    1.40571,
+   7.60430,   -3.98521,   -1.01852,   -0.25229,    0.11536,    0.85281,   -0.07025,    0.61516,    1.19230,    0.59239,   -0.08972,    0.04696,    0.24487,
+  10.16228,   -4.02588,   -2.57450,   -0.84913,   -1.32936,   -0.70299,   -0.18757,    0.13752,    2.50470,   -0.35897,    0.59181,    1.03806,   -0.46586,
+   9.22936,   -2.40744,   -4.14243,    0.19192,   -2.82431,   -1.14219,   -0.35091,   -0.02258,    2.76954,   -0.70597,    1.19266,    0.96075,   -1.51163,
+   4.69089,   -1.23410,   -3.08633,    0.74810,   -1.74740,   -1.17424,   -0.94292,    0.43392,    1.91493,   -1.01433,    1.01194,    0.40200,   -1.16780,
+  -0.40179,   -1.07478,   -0.30931,    0.43293,   -0.26438,   -0.77071,   -0.91637,   -0.11969,    0.27763,   -0.48280,   -0.05405,    0.49262,    0.44182,
+  -1.30888,   -0.39406,    0.54836,   -0.45560,   -0.50473,    0.52426,   -0.20709,   -0.85935,    0.01156,    0.10719,   -0.85375,   -0.44064,    0.40290,
+  -2.51316,   -0.30038,    2.24405,    0.04567,   -1.06727,    0.87352,    0.45662,   -0.89379,   -0.20271,    0.05209,   -1.62835,   -1.47750,   -0.11997,
+  -3.21890,   -0.63541,    2.74252,    0.08231,   -1.48443,    0.81144,    1.01067,   -0.51558,   -0.33487,   -0.18637,   -1.36021,   -1.25547,   -0.81002,
+  -3.33979,   -1.25056,    2.29572,    0.46226,   -1.03928,    0.60282,    1.02832,   -0.26747,   -0.32177,   -0.29693,   -1.01177,   -0.69273,   -0.40967,
+   3.83003,   -0.05123,   -2.80682,   -0.09836,   -2.32190,   -2.24635,    0.68250,    0.84364,    0.16497,   -0.45109,   -0.16244,    0.38569,    0.48638,
+   7.31336,    0.54444,   -5.62951,   -0.63746,   -1.75873,   -3.84618,   -0.14022,    1.48527,    0.40388,   -0.55469,    0.84476,    1.14843,    0.98476,
+   4.41448,    0.17238,   -4.92365,   -1.43925,    0.26561,   -2.63845,   -1.26464,    1.46021,    1.35545,    0.86784,    1.07002,    0.40436,    0.32996,
+  -0.36896,   -0.82490,   -2.96900,   -2.93555,    0.88918,   -0.55652,   -1.99362,    0.58595,    1.62409,    1.75007,    0.80250,   -0.38241,   -0.81236,
+  -5.98791,   -2.45780,   -0.14485,   -2.59323,    1.54421,    1.62512,   -2.02780,   -0.97582,    0.64449,    0.93279,    0.13137,   -0.73216,   -1.27276,
+ -10.65201,   -3.67712,    4.00352,   -0.89261,    1.51217,    3.81264,   -0.42121,   -1.77168,   -0.46698,    0.01170,   -0.70166,   -0.99424,   -1.27991,
+ -11.50435,   -4.47949,    5.00780,   -0.00562,    0.40033,    3.46732,    0.00612,   -2.73054,   -2.14512,   -1.20490,   -0.54356,    0.31733,   -0.26649,
+  -6.66438,   -2.96093,    3.70358,    1.25327,   -0.10438,    2.13594,    0.59832,   -2.27570,   -2.58900,   -1.88680,   -0.33843,    1.45507,    1.25116,
+   3.46550,    0.51885,   -0.84675,    0.86400,   -1.94527,   -0.89987,    1.09293,   -0.71812,   -2.02673,   -1.76489,    0.42939,    2.23159,    1.83621,
+   5.75795,    2.37091,   -1.18967,    0.53958,   -1.52071,   -1.74541,    0.81437,    0.14279,   -1.20862,   -0.84202,    0.89451,    1.63559,    1.28508,
+   5.89360,    3.85243,   -0.66250,    0.40889,    0.37277,   -0.73996,    0.74387,    1.29072,    0.38874,    0.70585,    0.82826,    0.10710,    0.19355,
+   5.84407,    3.89011,   -1.06566,   -0.28079,    0.54319,   -0.79726,   -0.10010,    1.10425,    0.99781,    0.80465,    0.30735,   -0.84430,   -0.65162,
+   4.15440,    2.84123,   -0.60409,    0.25845,    0.92166,   -0.71915,   -0.95923,    0.35210,    0.91047,    0.49826,   -0.35318,   -0.94804,   -0.42840,
+   2.92589,    2.50807,    0.45630,    0.74996,    1.16122,    0.25525,   -0.30100,    0.21691,    0.62228,    0.34482,   -0.39568,   -0.85899,   -0.66905,
+   1.59452,    1.46675,    0.42945,    0.58506,    0.71894,    0.06417,   -0.42137,   -0.13321,    0.04791,   -0.18779,   -0.51909,   -0.55866,   -0.39290,
+   0.65455,    0.85218,    0.66460,    0.61843,    0.56772,    0.43565,    0.20614,    0.15363,    0.13117,    0.04038,   -0.14326,   -0.17633,   -0.17159,
+   0.10242,    0.15728,    0.12141,    0.10756,    0.07157,    0.07602,    0.04603,    0.04986,    0.01415,   -0.01059,   -0.07097,   -0.09258,   -0.13962,
+  -0.10807,   -0.14815,   -0.14954,   -0.14938,   -0.17144,   -0.17927,   -0.17881,   -0.17386,   -0.18305,   -0.16965,   -0.14692,   -0.10745,   -0.07029,
+   0.10584,    0.12991,    0.14904,    0.13138,    0.10066,    0.05203,    0.07636,    0.04703,    0.01529,   -0.01553,    0.00099,   -0.01276,    0.01326,
+   0.08425,    0.10493,    0.13542,    0.09583,    0.08445,    0.04019,    0.05727,    0.02121,    0.00529,   -0.05714,   -0.05560,   -0.08407,   -0.08461,
+   0.38736,    0.54152,    0.51499,    0.46106,    0.39383,    0.33696,    0.29355,    0.24304,    0.17114,    0.08912,    0.01619,   -0.04488,   -0.10124,
+   0.57681,    0.80494,    0.77494,    0.73010,    0.67231,    0.60115,    0.51754,    0.42478,    0.32564,    0.22085,    0.11215,    0.00524,   -0.09257,
+   0.26486,    0.36144,    0.34555,    0.35903,    0.38883,    0.39362,    0.36565,    0.33999,    0.33649,    0.32611,    0.27476,    0.19535,    0.12650,
+  -0.21093,   -0.29556,   -0.27549,   -0.22377,   -0.15293,   -0.09083,   -0.04424,    0.00619,    0.07127,    0.13267,    0.16759,    0.17926,    0.18613,
+  -0.24814,   -0.35065,   -0.33638,   -0.33100,   -0.31921,   -0.31383,   -0.29480,   -0.27651,   -0.24126,   -0.20092,   -0.14114,   -0.08350,   -0.02431,
+  -0.28843,   -0.40336,   -0.39127,   -0.38497,   -0.38041,   -0.37398,   -0.35692,   -0.33460,   -0.30211,   -0.25593,   -0.18824,   -0.11065,   -0.03383,
+  -0.40388,   -0.55839,   -0.53149,   -0.49648,   -0.45975,   -0.41103,   -0.35497,   -0.29469,   -0.23840,   -0.17541,   -0.10385,   -0.02526,    0.04476,
+  -0.14369,   -0.19512,   -0.18577,   -0.17187,   -0.16303,   -0.14426,   -0.12706,   -0.11088,   -0.11023,   -0.11001,   -0.11046,   -0.10197,   -0.09659,
+   0.04986,    0.07119,    0.06103,    0.05379,    0.04249,    0.04304,    0.04101,    0.03907,    0.01958,   -0.00486,   -0.03727,   -0.06044,   -0.07913,
+   0.11266,    0.15889,    0.14937,    0.14394,    0.13163,    0.11935,    0.09718,    0.07876,    0.05759,    0.04176,    0.02059,    0.00451,   -0.01118,
+   0.24149,    0.33048,    0.30735,    0.29830,    0.29356,    0.27574,    0.23733,    0.20474,    0.18577,    0.16789,    0.12721,    0.07614,    0.03404,
+   0.30163,    0.39157,    0.35508,    0.31405,    0.30194,    0.24286,    0.17205,    0.09610,    0.07223,    0.04378,    0.00352,   -0.05629,   -0.07777,
+   1.16603,    0.08366,   -0.87697,    0.89533,    1.06093,   -0.67183,   -0.29497,    0.89540,    0.08121,   -0.66815,    0.15620,    0.37290,   -0.36530,
+   9.21120,   -2.82343,   -4.35932,    1.65763,    2.25143,   -1.19674,   -1.27081,    1.41156,   -0.36733,    0.31764,   -0.23390,    0.75131,   -0.77766,
+  15.01506,   -4.99195,   -4.69809,    1.75278,    2.29372,   -1.12755,   -1.63234,    0.76995,   -0.39868,    0.45715,   -0.90487,    0.81855,   -1.40586,
+  14.57721,   -5.55807,   -2.11822,    2.03013,    0.87119,   -0.47299,   -0.55130,    0.30038,   -0.43459,    0.55848,   -0.93575,    0.75292,   -1.14337,
+   9.43524,   -4.92830,    1.26108,    1.52253,   -1.06139,    0.05145,    0.22248,   -0.33880,   -0.28392,    1.37014,   -0.24485,   -0.51274,   -0.57657,
+   1.76494,   -2.84747,    3.64396,   -0.05073,   -3.01963,    0.48404,    0.46461,   -1.16985,   -0.00458,    0.98046,    0.29409,   -1.08060,    0.74265,
+  -5.80262,    1.24152,    4.80939,   -1.44025,   -4.01440,    0.55141,    0.71258,   -1.72037,    0.21627,   -0.15831,    0.42853,   -1.66622,    0.90462,
+ -11.92034,    4.72914,    4.48976,   -1.85948,   -3.03117,    1.10386,    1.54666,   -0.90115,   -0.23760,   -1.21684,    0.45243,   -1.10811,    1.32431,
+ -12.56092,    7.24714,    2.34986,   -1.60707,   -0.33190,    0.82056,    1.54856,    0.30501,   -0.65289,   -1.78371,    0.70834,    0.18954,    1.21825,
+  -6.37655,    7.25386,   -0.86549,   -1.12217,    2.09120,    0.25773,   -0.13453,    0.52964,   -0.37262,   -1.10231,    0.11707,    0.54931,    1.06735,
+  -5.12965,    9.50558,    0.06137,    0.05321,    4.38371,    1.29442,    0.60271,    0.50784,    0.90449,   -1.15440,   -0.76032,    0.79430,    0.42655,
+  -4.91588,   13.99363,    2.21924,    1.72793,    4.22636,    0.88434,    1.02688,   -0.11563,    0.80970,   -0.95908,   -1.16542,    0.09138,   -0.25571,
+  -0.49899,   13.50086,    0.45976,    2.08231,    2.57830,   -2.23514,    1.04862,   -0.80894,    0.50177,   -0.39439,   -1.32026,   -0.65541,   -0.80816,
+   2.50879,    7.05446,   -1.60173,    0.75048,    0.44241,   -3.20741,    1.17587,   -0.86417,    0.22232,   -0.48021,   -1.41610,    0.34684,   -1.20772,
+   3.91430,   -0.11253,   -2.98095,    0.26236,   -1.73557,   -2.69258,    0.81257,   -0.76090,   -0.14933,   -0.11267,   -0.33433,    0.35107,   -2.20689,
+   1.52597,   -8.08723,   -2.88646,   -0.01534,   -2.71940,   -0.59833,    0.44367,   -0.36338,   -0.70591,    0.03754,    1.07923,    1.28916,   -0.98862,
+  -3.19241,  -13.07944,   -1.45363,   -1.00014,   -3.23551,    1.02494,   -0.75324,   -0.25386,   -0.06039,    0.07222,    2.70142,    0.74582,   -0.42609,
+  -9.37112,  -14.01679,    1.58559,   -0.99353,   -2.70695,    2.30827,   -2.86112,    0.00562,    1.23688,   -0.28028,    3.36462,    0.27533,    0.38289,
+ -15.65554,   -9.69857,    5.53239,   -0.88511,   -0.60628,    4.34134,   -2.31666,    1.16146,    2.59606,    0.06560,    2.47435,    0.14085,    1.17497,
+ -17.38906,   -2.45135,    6.80561,   -2.23381,    0.29775,    4.22087,   -1.59670,    1.92207,    1.32889,    0.40569,    1.28392,   -1.24831,    1.78650,
+ -12.32829,   -0.45323,    4.02706,   -3.48876,    0.91028,    2.64065,   -0.40734,    1.09273,   -0.07504,    2.04266,   -0.33405,   -1.75580,    2.78248,
+  -5.48976,   -1.31076,    1.13817,   -4.10307,    1.68517,    2.64528,    0.61998,    0.31068,   -0.72026,    3.38016,   -1.75093,   -1.89652,    2.49156,
+   4.86518,    0.46760,   -4.62680,   -5.68874,   -0.53312,    1.27476,    0.91930,   -0.88800,   -1.38324,    3.65460,   -1.17333,   -1.46998,    1.47362,
+  23.59956,    1.54103,  -10.57325,   -6.55460,   -1.18614,   -3.24342,    0.75381,   -2.71058,   -3.01869,    2.41544,   -1.13300,   -0.30150,    0.60880,
+  32.33330,    2.70337,  -11.78239,   -3.99596,   -1.67392,   -5.54388,    0.69011,   -2.90009,   -3.08534,    0.37034,   -0.66760,    0.15104,   -1.36276,
+  30.06627,    3.26370,   -7.79426,   -0.60895,   -2.00669,   -5.92217,    1.14711,   -1.47520,   -1.91072,   -1.88113,    0.17036,    1.92276,   -2.50109,
+  39.37696,   -4.69698,   -0.04885,    4.63564,   -2.39147,   -3.19453,   -0.69839,    1.31964,    1.29003,   -2.78135,   -0.00281,    2.12537,   -1.31877,
+  28.62589,   -7.60295,    5.43086,    7.42344,   -1.12472,    0.32970,   -1.36870,    2.95929,    2.25571,   -2.58043,   -0.04934,    0.91945,   -0.31753,
+   8.88008,   -7.52631,    8.67690,    8.57319,    0.51966,    2.83565,   -1.92872,    2.09044,    2.85297,   -1.26876,   -0.45494,   -0.01347,    0.57816,
+ -11.64625,   -4.59992,    7.86869,    6.87035,    1.68262,    3.67577,   -1.31846,    1.67403,    2.82795,   -0.42186,   -0.61134,   -0.46697,    0.59609
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ASR_F_GOLDEN_HPP
\ No newline at end of file
diff --git a/tests/use_case/ad/AdTests.cc b/tests/use_case/ad/AdTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/ad/AdTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
new file mode 100644
index 0000000..b87699d
--- /dev/null
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <catch.hpp>
+#include <random>
+
+#include "AdModel.hpp"
+#include "AdGoldenInput.hpp"
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+
+#ifndef AD_FEATURE_VEC_DATA_SIZE
+#define AD_IN_FEATURE_VEC_DATA_SIZE (1024)
+#endif /* AD_FEATURE_VEC_DATA_SIZE */
+
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+    TfLiteTensor *inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    const size_t copySz = inputTensor->bytes < AD_IN_FEATURE_VEC_DATA_SIZE ? inputTensor->bytes : AD_IN_FEATURE_VEC_DATA_SIZE;
+
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor *inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist{-128, 127};
+
+    auto gen = [&dist, &mersenneGen]() {
+        return dist(mersenneGen);
+    };
+
+    std::vector<int8_t> randomInput(inputTensor->bytes);
+    std::generate(std::begin(randomInput), std::end(randomInput), gen);
+
+    REQUIRE(RunInference(model, randomInput.data()));
+    return true;
+}
+
+template <typename T>
+void TestInference(const T *input_goldenFV, const T *output_goldenFV, arm::app::Model& model)
+{
+    REQUIRE(RunInference(model, (int8_t*)input_goldenFV));
+
+    TfLiteTensor *outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == AD_OUT_FEATURE_VEC_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++)
+    {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running random inference with TensorFlow Lite Micro and AdModel Int8", "[AD][.]")
+{
+    arm::app::AdModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running golden vector inference with TensorFlow Lite Micro and AdModel Int8", "[AD][.]")
+{
+    arm::app::AdModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    TestInference(ad_golden_input, ad_golden_out, model);
+}
\ No newline at end of file
diff --git a/tests/use_case/ad/MelSpecTests.cc b/tests/use_case/ad/MelSpecTests.cc
new file mode 100644
index 0000000..affc67a
--- /dev/null
+++ b/tests/use_case/ad/MelSpecTests.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AdMelSpectrogram.hpp"
+#include <limits>
+#include <algorithm>
+#include <catch.hpp>
+
+/* First 1024 samples from test wav. */
+const std::vector<int16_t> testWav1 = std::vector<int16_t>{
+    490,495,445,403,390,259,126,146,
+    175,134,232,243,166,145,123,33,
+    -61,-4,8,-115,-281,-292,-210,-133,
+    -98,-142,-229,-356,-415,-438,-443,-396,
+    -377,-297,-85,122,172,16,-197,-351,
+    -484,-408,-378,-405,-399,-335,-180,-141,
+    -124,-108,-46,37,141,234,264,218,
+    147,164,132,111,125,73,2,36,
+    107,113,93,6,-40,-153,-273,-282,
+    -291,-298,-389,-446,-394,-324,-333,-385,
+    -485,-548,-690,-718,-660,-704,-690,-601,
+    -549,-641,-637,-513,-469,-366,-227,-269,
+    -348,-408,-486,-570,-638,-666,-730,-746,
+    -710,-634,-543,-461,-281,-156,-130,-126,
+    -144,-118,-23,103,132,37,-69,-86,
+    -234,-360,-366,-330,-248,-268,-282,-169,
+    -190,-152,-151,-145,-133,-205,-263,-397,
+    -558,-656,-668,-718,-779,-828,-856,-817,
+    -761,-759,-722,-772,-873,-983,-962,-897,
+    -843,-788,-750,-677,-555,-447,-373,-218,
+    -182,-230,-204,-174,-144,-127,-231,-199,
+    -127,-194,-250,-183,-189,-254,-249,-337,
+    -417,-459,-513,-505,-481,-402,-344,-284,
+    -281,-441,-450,-423,-327,-119,102,197,
+    208,173,102,103,165,131,15,75,
+    283,365,322,391,303,287,372,406,
+    493,577,640,681,577,498,524,511,
+    476,425,380,315,337,339,408,603,
+    749,745,672,654,588,520,523,544,
+    557,632,636,565,491,413,368,252,
+    136,33,1,-26,-152,-258,-98,18,
+    1,-18,-99,-117,-109,-228,-295,-349,
+    -334,-337,-441,-373,-279,-202,-204,-219,
+    -119,149,410,489,564,623,683,642,
+    707,872,932,862,833,862,894,784,
+    637,559,507,394,306,420,510,484,
+    519,526,599,789,959,1052,1063,1030,
+    860,697,603,530,475,463,468,461,
+    609,641,534,482,435,329,239,216,
+    185,82,88,106,60,26,-43,-127,
+    -220,-262,-317,-259,-172,-175,-271,-217,
+    -196,-164,8,144,150,134,60,13,
+    57,-58,-115,-171,-282,-310,-298,-106,
+    42,-101,-172,-181,-249,-326,-262,-132,
+    -56,-82,-71,-88,-196,-325,-426,-413,
+    -411,-317,-191,-172,-195,-292,-328,-191,
+    -88,-60,21,-63,-175,-135,-64,-83,
+    -163,-279,-440,-536,-403,-308,-236,-132,
+    -95,-69,-73,-21,13,133,185,251,
+    238,88,-66,-134,-175,-231,-219,-151,
+    -213,-328,-340,-374,-459,-601,-556,-395,
+    -248,-205,-174,-227,-402,-493,-464,-483,
+    -588,-564,-463,-493,-505,-416,-378,-313,
+    -215,-192,-192,-59,18,-40,-66,-60,
+    -143,-263,-213,-224,-265,-249,-237,-227,
+    -418,-504,-573,-699,-679,-577,-500,-570,
+    -538,-416,-444,-415,-294,-300,-427,-423,
+    -299,-279,-279,-187,-137,-123,60,230,
+    227,277,356,413,440,418,477,594,
+    697,729,586,561,653,570,590,628,
+    497,357,366,470,591,576,458,439,
+    417,431,447,349,304,241,294,406,
+    484,516,587,598,566,465,380,347,
+    316,391,429,409,216,69,57,76,
+    150,101,93,113,90,41,-28,-15,
+    -2,47,208,261,333,362,239,301,
+    422,431,426,434,482,510,480,407,
+    244,53,-108,-234,-275,-302,-304,-207,
+    -117,-181,-214,-248,-203,-52,5,-14,
+    24,-9,-154,-186,-82,-23,-62,-165,
+    -174,-190,-368,-414,-316,-301,-180,41,
+    116,214,319,408,416,157,-100,-40,
+    118,248,310,301,302,387,458,414,
+    301,261,233,111,33,39,65,56,
+    9,-92,-87,-98,-172,-196,-186,-18,
+    -14,-57,-111,-178,-278,-304,-358,-359,
+    -362,-464,-528,-400,-355,-284,-189,-240,
+    -253,-216,-319,-490,-621,-684,-758,-860,
+    -883,-877,-847,-787,-766,-852,-727,-481,
+    -339,-282,-266,-405,-414,-286,-225,-204,
+    -330,-488,-412,-292,-254,-290,-372,-436,
+    -545,-564,-413,-360,-344,-389,-430,-340,
+    -248,-271,-343,-383,-414,-409,-272,-223,
+    -215,-123,-10,-4,-6,-27,-11,78,
+    169,226,139,-19,16,100,54,-75,
+    -117,-103,-77,-277,-598,-644,-602,-509,
+    -396,-232,-227,-208,-153,-146,-205,-223,
+    -108,-55,-26,-8,-42,-178,-298,-320,
+    -254,-146,-135,-262,-370,-331,-337,-394,
+    -265,-53,136,309,354,312,345,303,
+    275,338,287,269,346,329,319,327,
+    199,118,251,296,243,111,90,150,
+    104,163,274,278,242,135,93,138,
+    5,-154,-206,-270,-334,-356,-251,-96,
+    -78,-123,-80,-93,-160,-217,-214,-154,
+    -42,128,228,243,307,465,492,425,
+    381,382,425,530,518,484,560,654,
+    659,663,723,717,672,652,542,507,
+    471,468,579,573,459,313,262,310,
+    284,235,331,361,275,207,104,35,
+    35,89,136,192,218,161,89,64,
+    116,175,159,95,96,242,350,248,
+    170,64,-35,-136,-202,-271,-307,-290,
+    -257,-219,-206,-185,-216,-213,-184,-135,
+    -165,-141,-25,-31,-28,-98,-247,-162,
+    10,35,-16,-113,-139,-127,-58,-100,
+    -166,-320,-406,-462,-604,-594,-650,-538,
+    -427,-365,-196,-117,-120,-102,-66,-122,
+    -211,-235,-202,-135,-40,-10,-38,-150,
+    -286,-223,-50,93,149,86,184,128,
+    113,163,13,-53,-135,-100,-72,-75,
+    -73,-118,-150,-197,-224,-131,-59,-109,
+    -92,-129,-189,-220,-166,-173,-114,-8,
+    26,-27,-38,50,109,143,161,209,
+    266,289,384,397,312,203,5,-64,
+    -14,6,56,67,19,-43,-112,-46,
+    -74,-101,-83,-115,-142,-207,-274,-292,
+    -299,-236,-181,-188,-48,60,6,-76,
+    -8,115,188,260,236,143,44,-30,
+    -17,31,37,-16,-28,87,210,276,
+    372,365,302,270,137,-8,-142,-246,
+    -279,-259,-203,-241,-278,-254,-245,-177,
+    -77,-8,-47,-159,-295,-412,-414,-414,
+    -566,-533,-255,-82,-10,222,358,336,
+    355,360,303,237,267,224,244,434,
+    422,372,404,464,559,538,446,294,
+    217,60,-82,-150,-144,-162,-250,-263,
+    -222,-148,-81,-134,-134,-106,-27,-71,
+};
+
+/* Golden log mel spec output for test wav. */
+const std::vector<float> testWavMelSpec {
+        -8.601085, -10.563560, -13.791912, -12.356619, -16.892878,
+        -16.913876, -15.695299, -21.848980, -21.193371, -18.772688,
+        -21.795116, -20.008236, -22.413673, -25.162649, -24.091856,
+        -24.936411, -19.341146, -23.534576, -29.052885, -26.562546,
+        -25.046455, -29.586889, -30.115177, -32.281334, -29.806450,
+        -30.398304, -26.682615, -27.397421, -31.224312, -31.033779,
+        -36.314369, -29.530331, -28.428139, -30.097546, -34.101303,
+        -32.660480, -34.229076, -34.668293, -35.140759, -34.104649,
+        -34.141472, -36.514408, -37.655891, -33.590931, -40.532566,
+        -39.105091, -39.600319, -40.239834, -41.356224, -41.103714,
+        -39.861557, -41.827553, -41.275696, -42.203575, -42.689217,
+        -46.495552, -46.704731, -45.560322, -47.423828, -50.672031,
+        -51.387669, -53.410839, -54.899536, -55.807552,
+};
+
+
+arm::app::audio::AdMelSpectrogram GetMelSpecInstance() {
+    int frameLenSamples = 1024;
+    return arm::app::audio::AdMelSpectrogram(frameLenSamples);
+}
+
+template <class T>
+void TestQuntisedMelSpec() {
+    float quantScale = 0.1410219967365265;
+    int quantOffset = 11;
+    std::vector<T> melSpecOutput = GetMelSpecInstance().MelSpecComputeQuant<T>(testWav1, quantScale, quantOffset);
+
+    long min_val = std::numeric_limits<T>::min();
+    long max_val = std::numeric_limits<T>::max();
+
+    for (size_t i = 0; i < testWavMelSpec.size(); i++){
+        long TestWavMelSpec = (std::lround((testWavMelSpec[i] / quantScale) + quantOffset));
+        T quantizedTestWavMelSpec = static_cast<T>(std::max(min_val, std::min(TestWavMelSpec, max_val)));
+
+        REQUIRE(quantizedTestWavMelSpec  == Approx(melSpecOutput[i]).margin(1));
+    }
+}
+
+template void TestQuntisedMelSpec<int8_t>();
+template void TestQuntisedMelSpec<uint8_t>();
+template void TestQuntisedMelSpec<int16_t>();
+
+TEST_CASE("Mel Spec calculation") {
+
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+    hal_platform_init(&platform);
+
+    SECTION("FP32") {
+        auto melSpecOutput = GetMelSpecInstance().ComputeMelSpec(testWav1);
+        REQUIRE_THAT( melSpecOutput, Catch::Approx( testWavMelSpec ).margin(0.1) );
+    }
+
+    SECTION("int8_t") {
+        TestQuntisedMelSpec<int8_t>();
+    }
+
+    SECTION("uint8_t") {
+        TestQuntisedMelSpec<uint8_t>();
+    }
+
+    SECTION("int16_t") {
+        TestQuntisedMelSpec<int16_t>();
+    }
+}
diff --git a/tests/use_case/ad/PostProcessTests.cc b/tests/use_case/ad/PostProcessTests.cc
new file mode 100644
index 0000000..62fa9e7
--- /dev/null
+++ b/tests/use_case/ad/PostProcessTests.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AdPostProcessing.hpp"
+#include <catch.hpp>
+
+TEST_CASE("Softmax_vector") {
+
+    std::vector<float> testVec = {1, 2, 3, 4, 1, 2, 3};
+    arm::app::Softmax(testVec);
+    CHECK((testVec[0] - 0.024) == Approx(0.0).margin(0.001));
+    CHECK((testVec[1] - 0.064) == Approx(0.0).margin(0.001));
+    CHECK((testVec[2] - 0.175) == Approx(0.0).margin(0.001));
+    CHECK((testVec[3] - 0.475) == Approx(0.0).margin(0.001));
+    CHECK((testVec[4] - 0.024) == Approx(0.0).margin(0.001));
+    CHECK((testVec[5] - 0.064) == Approx(0.0).margin(0.001));
+    CHECK((testVec[6] - 0.175) == Approx(0.0).margin(0.001));
+}
+
+TEST_CASE("Output machine index") {
+
+    auto index = arm::app::OutputIndexFromFileName("test_id_00.wav");
+    CHECK(index == 0);
+
+    auto index1 = arm::app::OutputIndexFromFileName("test_id_02.wav");
+    CHECK(index1 == 1);
+
+    auto index2 = arm::app::OutputIndexFromFileName("test_id_4.wav");
+    CHECK(index2 == 2);
+
+    auto index3 = arm::app::OutputIndexFromFileName("test_id_6.wav");
+    CHECK(index3 == 3);
+
+    auto index4 = arm::app::OutputIndexFromFileName("test_id_id_00.wav");
+    CHECK(index4 == -1);
+
+    auto index5 = arm::app::OutputIndexFromFileName("test_id_7.wav");
+    CHECK(index5 == -1);
+}
\ No newline at end of file
diff --git a/tests/use_case/asr/AsrClassifierTests.cc b/tests/use_case/asr/AsrClassifierTests.cc
new file mode 100644
index 0000000..7c71912
--- /dev/null
+++ b/tests/use_case/asr/AsrClassifierTests.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AsrClassifier.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Test invalid classifier")
+{
+    TfLiteTensor* outputTens = nullptr;
+    std::vector <arm::app::ClassificationResult> resultVec;
+    arm::app::AsrClassifier classifier;
+
+    REQUIRE(!classifier.GetClassificationResults(outputTens, resultVec, {}, 1));
+}
+
+
+TEST_CASE("Test valid classifier UINT8") {
+    const int dimArray[] = {4, 1, 1, 246, 29};
+    std::vector <std::string> labels(29);
+    std::vector <uint8_t> outputVec(7134);
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+                                outputVec.data(), dims, 1, 0, "test");
+    TfLiteTensor* outputTensor = &tfTensor;
+    std::vector <arm::app::ClassificationResult> resultVec;
+    arm::app::AsrClassifier classifier;
+
+    REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 1));
+    REQUIRE(246 == resultVec.size());
+}
+
+
+TEST_CASE("Get classification results") {
+    const int dimArray[] = {4, 1, 1, 10, 15};
+    std::vector <std::string> labels(15);
+    std::vector<uint8_t> outputVec(150, static_cast<uint8_t>(1));
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+                                outputVec.data(), dims, 1, 0, "test");
+    TfLiteTensor* outputTensor = &tfTensor;
+
+    std::vector <arm::app::ClassificationResult> resultVec(10);
+
+    /* set the top five results: */
+    std::vector<std::pair<uint32_t, std::pair<uint32_t, uint8_t>>> selectedResults {
+        {0, {3, 23}},
+        {0, {9, 15}},
+        {1, {5, 24}},
+        {1, {7, 4}},
+        {2, {9, 5}},
+        {3, {8, 6}},
+        {4, {13, 10}},
+        {4, {6, 18}},
+        {5, {3, 15}},
+        {5, {4, 115}},
+        {6, {6, 25}},
+        {7, {1, 7}},
+        {8, {11, 9}},
+        {9, {1, 10}}
+    };
+
+    const uint32_t nCols = outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx];
+    for (size_t i = 0; i < selectedResults.size(); ++i) {
+        uint32_t rIndex = selectedResults[i].first;
+        uint32_t cIndex = selectedResults[i].second.first;
+        uint8_t   value = selectedResults[i].second.second;
+        outputVec[rIndex * nCols + cIndex] = value;
+    }
+
+    arm::app::AsrClassifier classifier;
+
+    REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 1));
+    REQUIRE(resultVec[0].m_labelIdx == 3);
+    REQUIRE(resultVec[1].m_labelIdx == 5);
+    REQUIRE(resultVec[2].m_labelIdx == 9);
+    REQUIRE(resultVec[3].m_labelIdx == 8);
+    REQUIRE(resultVec[4].m_labelIdx == 6);
+    REQUIRE(resultVec[5].m_labelIdx == 4);
+    REQUIRE(resultVec[6].m_labelIdx == 6);
+    REQUIRE(resultVec[7].m_labelIdx == 1);
+    REQUIRE(resultVec[8].m_labelIdx == 11);
+    REQUIRE(resultVec[9].m_labelIdx == 1);
+}
diff --git a/tests/use_case/asr/AsrFeaturesTests.cc b/tests/use_case/asr/AsrFeaturesTests.cc
new file mode 100644
index 0000000..9401f40
--- /dev/null
+++ b/tests/use_case/asr/AsrFeaturesTests.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DataStructures.hpp"
+#include "AsrGoldenFeatures.hpp"
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterPreprocess.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+class TestPreprocess : public arm::app::audio::asr::Preprocess {
+public:
+    TestPreprocess()
+    : arm::app::audio::asr::Preprocess(0,0,0,0)
+    {}
+
+    bool ComputeDeltas(arm::app::Array2d<float>& mfcc,
+                       arm::app::Array2d<float>& delta1,
+                       arm::app::Array2d<float>& delta2)
+    {
+        return this->_ComputeDeltas(mfcc, delta1, delta2);
+    }
+
+    float GetMean(arm::app::Array2d<float>& vec)
+    {
+        return this->_GetMean(vec);
+    }
+
+    float GetStdDev(arm::app::Array2d<float>& vec, const float mean)
+    {
+       return this->_GetStdDev(vec, mean);
+    }
+
+    void NormaliseVec(arm::app::Array2d<float>& vec)
+    {
+        return this->_NormaliseVec(vec);
+    }
+};
+
+template<class T>
+void CheckOutputs(const std::vector<T> goldenOutput, std::vector<T> output)
+{
+    const size_t goldenSize = goldenOutput.size();
+    const size_t realSize = output.size();
+
+    REQUIRE(realSize == goldenSize);
+    REQUIRE_THAT(output, Catch::Approx( goldenOutput ).margin(0.0001));
+}
+template void CheckOutputs<float>(const std::vector<float> goldenOutput, std::vector<float> output);
+
+void populateBuffer(const float* input, size_t size, size_t numMfccFeats, std::vector<std::vector<float>>& buf)
+{
+    size_t time = 0;
+    for (size_t i = 0; i < size; ++i) {
+        if (i > 0 && i % numMfccFeats == 0) {
+            ++time;
+        }
+        float featureValue = *(input + i);
+        buf[i % numMfccFeats][time] = featureValue;
+    }
+}
+
+void populateArray2dWithVectorOfVector(std::vector<std::vector<float>> vec, arm::app::Array2d<float>& buf)
+{
+    for (size_t i = 0; i < vec.size(); ++i) {
+        for (size_t j = 0; j < vec[i].size(); ++j) {
+            buf(i, j) = vec[i][j];
+        }
+    }
+}
+
+TEST_CASE("Floating point asr features calculation", "[ASR]")
+{
+    TestPreprocess tp;
+
+    SECTION("First and second diff")
+    {
+        constexpr uint32_t numMfccFeats = 13;
+        constexpr uint32_t numFeatVectors = 296;
+
+        arm::app::Array2d<float> mfccBuf(numMfccFeats, numFeatVectors);
+        arm::app::Array2d<float> delta1Buf(numMfccFeats, numFeatVectors);
+        arm::app::Array2d<float> delta2Buf(numMfccFeats, numFeatVectors);
+
+        std::vector<std::vector<float>> goldenMfccBuf(numMfccFeats, std::vector<float>(numFeatVectors));
+        std::vector<std::vector<float>> goldenDelta1Buf(numMfccFeats, std::vector<float>(numFeatVectors));
+        std::vector<std::vector<float>> goldenDelta2Buf(numMfccFeats, std::vector<float>(numFeatVectors));
+
+        populateBuffer(golden_asr_mfcc, golden_asr_mfcc_len, numMfccFeats, goldenMfccBuf);
+        populateBuffer(golden_diff1_features, golden_diff1_len, numMfccFeats, goldenDelta1Buf);
+        populateBuffer(golden_diff2_features, golden_diff2_len, numMfccFeats, goldenDelta2Buf);
+
+        populateArray2dWithVectorOfVector(goldenMfccBuf, mfccBuf);
+        std::fill(delta1Buf.begin(), delta1Buf.end(), 0.f);
+        std::fill(delta2Buf.begin(), delta2Buf.end(), 0.f);
+
+        tp.ComputeDeltas(mfccBuf, delta1Buf, delta2Buf);
+
+        /* First 4 and last 4 values are different because we pad AFTER diff calculated. */
+        for (size_t i = 0; i < numMfccFeats; ++i) {
+            const float* start_goldenDelta1Buf = goldenDelta1Buf[i].data() + 4;
+            const float* start_delta1 = delta1Buf.begin() + i * delta1Buf.size(1) + 4;
+            std::vector<float> goldenDataDelta1(start_goldenDelta1Buf, start_goldenDelta1Buf + numFeatVectors - 8);
+            std::vector<float> tensorDataDelta1(start_delta1, start_delta1 + numFeatVectors - 8);
+
+            CheckOutputs<float>(goldenDataDelta1,tensorDataDelta1);
+
+            const float* start_goldenDelta2Buf = goldenDelta2Buf[i].data() + 4;
+            const float* start_delta2 = delta2Buf.begin() + i * delta2Buf.size(1) + 4;
+            std::vector<float> goldenDataDelta2(start_goldenDelta2Buf, start_goldenDelta2Buf + numFeatVectors - 8);
+            std::vector<float> tensorDataDelta2(start_delta2, start_delta2 + numFeatVectors - 8);
+
+            CheckOutputs<float>(goldenDataDelta2,tensorDataDelta2);
+        }
+
+    }
+
+    SECTION("Mean")
+    {
+        std::vector<std::vector<float>> mean1vec{{1, 2},
+                                                {-1, -2}};
+        arm::app::Array2d<float> mean1(2,2); /* {{1, 2},{-1, -2}} */
+        populateArray2dWithVectorOfVector(mean1vec, mean1);
+        REQUIRE(0 == Approx(tp.GetMean(mean1)));
+
+        arm::app::Array2d<float> mean2(2, 2);
+        std::fill(mean2.begin(), mean2.end(), 0.f);
+        REQUIRE(0 == Approx(tp.GetMean(mean2)));
+
+        arm::app::Array2d<float> mean3(3,3);
+        std::fill(mean3.begin(), mean3.end(), 1.f);
+        REQUIRE(1 == Approx(tp.GetMean(mean3)));
+    }
+
+    SECTION("Std")
+    {
+        arm::app::Array2d<float> std1(2, 2);
+        std::fill(std1.begin(), std1.end(), 0.f); /* {{0, 0}, {0, 0}} */
+        REQUIRE(0 == Approx(tp.GetStdDev(std1, 0)));
+
+        std::vector<std::vector<float>> std2vec{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}};
+        arm::app::Array2d<float> std2(2,5);
+        populateArray2dWithVectorOfVector(std2vec, std2);
+        const float mean = tp.GetMean(std2);
+        REQUIRE(2.872281323 == Approx(tp.GetStdDev(std2, mean)));
+
+        arm::app::Array2d<float> std3(2,2);
+        std::fill(std3.begin(), std3.end(), 1.f); /* std3{{1, 1}, {1, 1}}; */
+        REQUIRE(0 == Approx(tp.GetStdDev(std3, 1)));
+    }
+
+    SECTION("Norm") {
+        auto checker = [&](arm::app::Array2d<float>& d, std::vector<float>& g) {
+            tp.NormaliseVec(d);
+            std::vector<float> d_vec(d.begin(), d.end());
+            REQUIRE_THAT(g, Catch::Approx(d_vec));
+        };
+
+        std::vector<std::vector<float>> norm0vec{{1, 1}, {1, 1}};
+        std::vector<float> goldenNorm0 {0, 0, 0, 0};
+        arm::app::Array2d<float> norm0(2, 2);
+        populateArray2dWithVectorOfVector(norm0vec, norm0);
+        checker(norm0, goldenNorm0);
+
+        std::vector<std::vector<float>> norm1vec{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}};
+        std::vector<float> goldenNorm1 {
+            -1.218543592, -0.87038828, -0.522232968, -0.174077656, 0.174077656,
+             0.522232968,  0.87038828,  1.218543592,  1.566698904, -1.566698904};
+        arm::app::Array2d<float> norm1(2, 5);
+        populateArray2dWithVectorOfVector(norm1vec, norm1);
+        checker(norm1, goldenNorm1);
+    }
+}
diff --git a/tests/use_case/asr/AsrTests.cc b/tests/use_case/asr/AsrTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/asr/AsrTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
new file mode 100644
index 0000000..1fa4092
--- /dev/null
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+#include "TestData_asr.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+    return true;
+}
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    arm::app::Wav2LetterModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        auto input_goldenFV = get_ifm_data_array(i);;
+        auto output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::Wav2LetterModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/asr/MfccTests.cc b/tests/use_case/asr/MfccTests.cc
new file mode 100644
index 0000000..c70e53e
--- /dev/null
+++ b/tests/use_case/asr/MfccTests.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 512 samples from itellyou.wav. */
+const std::vector<int16_t> testWav1 = std::vector<int16_t> {
+    -3,0,1,-1,2,3,-2,2,
+    1,-2,0,3,-1,8,3,2,
+    -1,-1,2,7,3,5,6,6,
+    6,12,5,6,3,3,5,4,
+    4,6,7,7,7,3,7,2,
+    8,4,4,2,-4,-1,-1,-4,
+    2,1,-1,-4,0,-7,-6,-2,
+    -5,1,-5,-1,-7,-3,-3,-7,
+    0,-3,3,-5,0,1,-2,-2,
+    -3,-3,-7,-3,-2,-6,-5,-8,
+    -2,-8,4,-9,-4,-9,-5,-5,
+    -3,-9,-3,-9,-1,-7,-4,1,
+    -3,2,-8,-4,-4,-5,1,-3,
+    -1,0,-1,-2,-3,-2,-4,-1,
+    1,-1,3,0,3,2,0,0,
+    0,-3,1,1,0,8,3,4,
+    1,5,6,4,7,3,3,0,
+    3,6,7,6,4,5,9,9,
+    5,5,8,1,6,9,6,6,
+    7,1,8,1,5,0,5,5,
+    0,3,2,7,2,-3,3,0,
+    3,0,0,0,2,0,-1,-1,
+    -2,-3,-8,0,1,0,-3,-3,
+    -3,-2,-3,-3,-4,-6,-2,-8,
+    -9,-4,-1,-5,-3,-3,-4,-3,
+    -6,3,0,-1,-2,-9,-4,-2,
+    2,-1,3,-5,-5,-2,0,-2,
+    0,-1,-3,1,-2,9,4,5,
+    2,2,1,0,-6,-2,0,0,
+    0,-1,4,-4,3,-7,-1,5,
+    -6,-1,-5,4,3,9,-2,1,
+    3,0,0,-2,1,2,1,1,
+    0,3,2,-1,3,-3,7,0,
+    0,3,2,2,-2,3,-2,2,
+    -3,4,-1,-1,-5,-1,-3,-2,
+    1,-1,3,2,4,1,2,-2,
+    0,2,7,0,8,-3,6,-3,
+    6,1,2,-3,-1,-1,-1,1,
+    -2,2,1,2,0,-2,3,-2,
+    3,-2,1,0,-3,-1,-2,-4,
+    -6,-5,-8,-1,-4,0,-3,-1,
+    -1,-1,0,-2,-3,-7,-1,0,
+    1,5,0,5,1,1,-3,0,
+    -6,3,-8,4,-8,6,-6,1,
+    -6,-2,-5,-6,0,-5,4,-1,
+    4,-2,1,2,1,0,-2,0,
+    0,2,-2,2,-5,2,0,-2,
+    1,-2,0,5,1,0,1,5,
+    0,8,3,2,2,0,5,-2,
+    3,1,0,1,0,-2,-1,-3,
+    1,-1,3,0,3,0,-2,-1,
+    -4,-4,-4,-1,-4,-4,-3,-6,
+    -3,-7,-3,-1,-2,0,-5,-4,
+    -7,-3,-2,-2,1,2,2,8,
+    5,4,2,4,3,5,0,3,
+    3,6,4,2,2,-2,4,-2,
+    3,3,2,1,1,4,-5,2,
+    -3,0,-1,1,-2,2,5,1,
+    4,2,3,1,-1,1,0,6,
+    0,-2,-1,1,-1,2,-5,-1,
+    -5,-1,-6,-3,-3,2,4,0,
+    -1,-5,3,-4,-1,-3,-4,1,
+    -4,1,-1,-1,0,-5,-4,-2,
+    -1,-1,-3,-7,-3,-3,4,4,
+};
+
+const std::vector<int16_t> testWav2 = std::vector<int16_t> (512, 0);
+
+/* Golden mfcc output for testwav1. */
+const std::vector<float> golden_mfcc_output_testWav1 {
+    -835.24603, 21.010452, 18.699404, 7.4338417, 19.028961, -5.401735, 6.4761047, -11.400679,
+    8.392709, 12.202361, 8.403276, -13.508412, -18.307348
+};
+
+/* Golden mfcc output for the all zero wav. */
+const std::vector<float> golden_mfcc_output_testWav2 {
+    -1131.37085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+arm::app::audio::Wav2LetterMFCC GetMFCCInstance()
+{
+    const auto sampFreq = arm::app::audio::Wav2LetterMFCC::ms_defaultSamplingFreq;
+    const auto frameLenMs = 32;
+    const auto numMfccFeats = 13;
+    const auto frameLenSamples = sampFreq * frameLenMs * 0.001;
+    return arm::app::audio::Wav2LetterMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuantisedMFCC()
+{
+    const auto quantScale = 0.1410219967365265;
+    const auto quantOffset = 11;
+    std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav1, quantScale, quantOffset);
+
+    long min_val = std::numeric_limits<T>::min();
+    long max_val = std::numeric_limits<T>::max();
+
+    for (size_t i = 0; i < golden_mfcc_output_testWav1.size(); i++){
+        long TestWavMfcc = (std::lround((golden_mfcc_output_testWav1[i] / quantScale) + quantOffset));
+        T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+        REQUIRE(quantizedTestWavMfcc  == Approx(mfccOutput[i]).margin(2));
+    }
+}
+
+template void TestQuantisedMFCC<int8_t>();
+template void TestQuantisedMFCC<uint8_t>();
+template void TestQuantisedMFCC<int16_t>();
+
+TEST_CASE("MFCC calculation")
+{
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+    hal_platform_init(&platform);
+
+    SECTION("FP32")
+    {
+        auto mfccOutput = GetMFCCInstance().MfccCompute(testWav1);
+        REQUIRE_THAT( mfccOutput, Catch::Approx( golden_mfcc_output_testWav1 ).margin(0.3) );
+
+        auto mfccOutput2 = GetMFCCInstance().MfccCompute(testWav2);
+        REQUIRE_THAT( mfccOutput2, Catch::Approx( golden_mfcc_output_testWav2 ).margin(0.001) );
+    }
+
+    SECTION("int8_t")
+    {
+        TestQuantisedMFCC<int8_t>();
+    }
+
+    SECTION("uint8_t")
+    {
+        TestQuantisedMFCC<uint8_t>();
+    }
+
+    SECTION("int16_t")
+    {
+        TestQuantisedMFCC<int16_t>();
+    }
+}
diff --git a/tests/use_case/asr/OutputDecodeTests.cc b/tests/use_case/asr/OutputDecodeTests.cc
new file mode 100644
index 0000000..22153f3
--- /dev/null
+++ b/tests/use_case/asr/OutputDecodeTests.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "OutputDecode.hpp"
+
+#include "catch.hpp"
+
+TEST_CASE("Running output decode on test vector") {
+
+    std::vector<arm::app::ClassificationResult> vecResult(20);
+    /* Number of test inputs. */
+    const size_t numStrings = 8; 
+    
+    /* The test inputs. */
+    std::string testText[numStrings][20] 
+    {
+        {"a", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "\'", "l"},  /* initial */
+        {" ", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "\'", " "},  /* space start and end */
+        {"\'", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "l", "\'"}, /* apostrophe start and end */
+        {"a", "a", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "l", "l"},   /* Double start and end */
+        {"a", "b", "c", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "\'", "\'", "l"},  /* Legit double character */
+        {"a", "$", "a", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "$", "l"},    /* Legit double character start and end */
+        {"$", "a", "b", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "$", "$"},    /* $$ */
+        {"$", "a", "b", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "l", "l"}
+    };
+
+    /* The golden outputs for the above test inputs. */
+    std::string expectedOutput[numStrings] =
+    {
+        {"abcdefg hi jk\'l"},
+        {" bcdefg hi jk\' "},
+        {"\'bcdefg hi jk\'l\'"},
+        {"acdefg hi jk\'l"},
+        {"abcdefgoohi jk\'l"},
+        {"aadefgoohi jkll"},
+        {"abdefgoohi jkl"},
+        {"abdefgoohi jkl"}
+    };
+
+    /*For each test input. */
+    for (size_t h = 0; h < numStrings; ++h)
+    {
+        /* Generate fake vecResults.m_label to mimic AsrClassifier output containing the testText. */
+        for (size_t i = 0; i < 20; i++)
+        {
+            vecResult[i].m_label = testText[h][i];
+        }
+        /* Call function with fake vecResults and save returned string into 'buff'. */
+        std::string buff = arm::app::audio::asr::DecodeOutput(vecResult); 
+
+        /* Check that the string returned from the function matches the expected output given above. */
+        REQUIRE(buff.compare(expectedOutput[h]) == 0); 
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
new file mode 100644
index 0000000..9ed2e1b
--- /dev/null
+++ b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+template <typename T>
+static TfLiteTensor GetTestTensor(
+                        std::vector <int>&      shape,
+                        T                       initVal,
+                        std::vector<T>&         vectorBuf)
+{
+    REQUIRE(0 != shape.size());
+
+    shape.insert(shape.begin(), shape.size());
+    uint32_t sizeInBytes = sizeof(T);
+    for (size_t i = 1; i < shape.size(); ++i) {
+        sizeInBytes *= shape[i];
+    }
+
+    /* Allocate mem. */
+    vectorBuf = std::vector<T>(sizeInBytes, initVal);
+    TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(shape.data());
+    return tflite::testing::CreateQuantizedTensor(
+                                vectorBuf.data(), dims,
+                                1, 0, "test-tensor");
+}
+
+TEST_CASE("Checking return value")
+{
+    SECTION("Mismatched post processing parameters and tensor size")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 1, 13};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+        REQUIRE(false == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+
+    SECTION("Post processing succeeds")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 13, 1};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+}
+
+
+TEST_CASE("Postprocessing - erasing required elements")
+{
+    constexpr uint32_t ctxLen = 5;
+    constexpr uint32_t innerLen = 3;
+    constexpr uint32_t nRows = 2*ctxLen + innerLen;
+    constexpr uint32_t nCols = 10;
+    constexpr uint32_t blankTokenIdx = nCols - 1;
+    std::vector <int> tensorShape = {1, 1, nRows, nCols};
+
+    SECTION("First and last iteration")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+        REQUIRE(originalVec == tensorVec);
+    }
+
+    SECTION("Right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase the right context only. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+        REQUIRE(originalVec != tensorVec);
+
+        /* The last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+                }
+
+                /* Check left context is preserved. */
+                CHECK(tensorVec[i*nCols + j] == originalVec[i*nCols + j]);
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Left and right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase right context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        REQUIRE(originalVec != tensorVec);
+
+        /* The first and last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check left and right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+                    CHECK(tensorVec[i*nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+                    CHECK(tensorVec[i*nCols + j] == 0);
+                }
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            /* Check left context is preserved. */
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Try left context erase")
+    {
+        /* Should not be able to erase the left context if it is the first iteration. */
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+
+        REQUIRE(originalVec == tensorVec);
+    }
+}
diff --git a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
new file mode 100644
index 0000000..1391011
--- /dev/null
+++ b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include <limits>
+#include <algorithm>
+#include <catch.hpp>
+
+constexpr uint32_t numMfccFeatures = 13;
+constexpr uint32_t numMfccVectors  = 10;
+
+/* Test vector output: generated using test-asr-preprocessing.py. */
+int8_t expectedResult[numMfccVectors][numMfccFeatures * 3] = {
+    /* Feature vec 0. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,    /* MFCCs.   */
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,    /* Delta 1. */
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,    /* Delta 2. */
+
+    /* Feature vec 1. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 3. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 4 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -38, -29,  -9,   1,  -2,  -7,  -8,  -8, -12, -16, -14,  -5,   5,
+    -68, -50, -13,   5,   0,  -9,  -9,  -8, -13, -20, -19,  -3,  15,
+
+    /* Feature vec 5 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -62, -45, -11,   5,   0,  -8,  -9,  -8, -12, -19, -17,  -3,  13,
+    -27, -22, -13,  -9, -11, -12, -12, -11, -11, -13, -13, -10,  -6,
+
+    /* Feature vec 6. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 7. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 8. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 9. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+};
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+    constexpr int int16max = std::numeric_limits<int16_t>::max();
+    int val = 0;
+    for (size_t i = 0; i < vec.size(); ++i, ++val) {
+
+        /* We want a differential filter response from both - order 1
+         * and 2 => Don't have a linear signal here - we use a signal
+         * using squares for example. Alternate sign flips might work
+         * just as well and will be computationally less work! */
+        int valsq = val * val;
+        if (valsq > int16max) {
+            val = 0;
+            valsq = 0;
+        }
+        vec[i] = valsq;
+    }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+    /* Initialise the HAL and platform. */
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Constants. */
+    const uint32_t  windowLen       = 512;
+    const uint32_t  windowStride    = 160;
+    const int       dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
+    const float     quantScale      = 0.1410219967365265;
+    const int       quantOffset     = -11;
+
+    /* Test wav memory. */
+    std::vector <int16_t> testWav((windowStride * numMfccVectors) +
+                                  (windowLen - windowStride));
+
+    /* Populate with dummy input. */
+    PopulateTestWavVector(testWav);
+
+    /* Allocate mem for tensor. */
+    std::vector<int8_t> tensorVec(dimArray[1]*dimArray[2]*dimArray[3]);
+
+    /* Initialise dimensions and the test tensor. */
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tensor = tflite::testing::CreateQuantizedTensor(
+        tensorVec.data(), dims, quantScale, quantOffset, "preprocessedInput");
+
+    /* Initialise pre-processing module. */
+    arm::app::audio::asr::Preprocess prep{
+        numMfccFeatures, windowLen, windowStride, numMfccVectors};
+
+    /* Invoke pre-processing. */
+    REQUIRE(prep.Invoke(testWav.data(), testWav.size(), &tensor));
+
+    /* Wrap the tensor with a std::vector for ease. */
+    int8_t * tensorData = tflite::GetTensorData<int8_t>(&tensor);
+    std::vector <int8_t> vecResults =
+        std::vector<int8_t>(tensorData, tensorData + tensor.bytes);
+
+    /* Check sizes. */
+    REQUIRE(vecResults.size() == sizeof(expectedResult));
+
+    /* Check that the elements have been calculated correctly. */
+    for (uint32_t j = 0; j < numMfccVectors; ++j) {
+        for (uint32_t i = 0; i < numMfccFeatures * 3; ++i) {
+            size_t tensorIdx = (j * numMfccFeatures * 3) + i;
+            CHECK(vecResults[tensorIdx] == expectedResult[j][i]);
+        }
+    }
+}
diff --git a/tests/use_case/img_class/ImgClassTests.cc b/tests/use_case/img_class/ImgClassTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/img_class/ImgClassTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/img_class/ImgClassificationUCTest.cc b/tests/use_case/img_class/ImgClassificationUCTest.cc
new file mode 100644
index 0000000..abfcc44
--- /dev/null
+++ b/tests/use_case/img_class/ImgClassificationUCTest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ClassificationResult.hpp"
+#include "Classifier.hpp"
+#include "hal.h"
+#include "Labels.hpp"
+#include "MobileNetModel.hpp"
+#include "UseCaseHandler.hpp"
+#include "UseCaseCommonUtils.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Model info")
+{
+    /* Model wrapper object. */
+    arm::app::MobileNetModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<arm::app::Model&>("model", model);
+
+    REQUIRE(model.ShowModelInfoHandler());
+}
+
+
+TEST_CASE("Inference by index", "[.]")
+{
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Model wrapper object. */
+    arm::app::MobileNetModel model;    
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("imgIndex", 0);
+    arm::app::Classifier classifier;    /* Classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+    std::vector <std::string> labels;
+    GetLabelsVector(labels);
+    caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+    REQUIRE(arm::app::ClassifyImageHandler(caseContext, 0, false));
+
+    auto results = caseContext.Get<std::vector<arm::app::ClassificationResult>>("results");
+
+    REQUIRE(results[0].m_labelIdx == 282);
+}
+
+
+TEST_CASE("Inference run all images", "[.]")
+{
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Model wrapper object. */
+    arm::app::MobileNetModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("imgIndex", 0);
+    arm::app::Classifier classifier;    /* classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+    std::vector <std::string> labels;
+    GetLabelsVector(labels);
+    caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+    REQUIRE(arm::app::ClassifyImageHandler(caseContext, 0, true));
+}
+
+
+TEST_CASE("List all images")
+{
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Model wrapper object. */
+    arm::app::MobileNetModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+
+    REQUIRE(arm::app::ListFilesHandler(caseContext));
+}
\ No newline at end of file
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
new file mode 100644
index 0000000..698382f
--- /dev/null
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "ImageUtils.hpp"
+#include "MobileNetModel.hpp"
+#include "TensorFlowLiteMicro.hpp"
+#include "TestData_img_class.hpp"
+
+#include <catch.hpp>
+
+
+bool RunInference(arm::app::Model& model, const uint8_t imageData[])
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+                            inputTensor->bytes :
+                            IFM_DATA_SIZE;
+    memcpy(inputTensor->data.data, imageData, copySz);
+
+    if(model.IsDataSigned()){
+        convertImgIoInt8(inputTensor->data.data, copySz);
+    }
+
+    return model.RunInference();
+}
+
+template<typename T>
+void TestInference(int imageIdx, arm::app::Model& model, T tolerance) {
+    auto image = get_ifm_data_array(imageIdx);
+    auto goldenFV = get_ofm_data_array(imageIdx);
+
+    REQUIRE(RunInference(model, image));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == Approx((int)((T)goldenFV[i])).epsilon(tolerance));
+    }
+}
+
+
+TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "[MobileNetV2]")
+{
+    SECTION("Executing inferences sequentially")
+    {
+        arm::app::MobileNetModel model{};
+
+        REQUIRE_FALSE(model.IsInited());
+        REQUIRE(model.Init());
+        REQUIRE(model.IsInited());
+
+        for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+            TestInference<uint8_t>(i, model, 1);
+        }
+    }
+
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::MobileNetModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<uint8_t>(i, model, 1);
+        }
+    }
+}
diff --git a/tests/use_case/kws/InferenceTestDSCNN.cc b/tests/use_case/kws/InferenceTestDSCNN.cc
new file mode 100644
index 0000000..06358a4
--- /dev/null
+++ b/tests/use_case/kws/InferenceTestDSCNN.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "hal.h"
+#include "TestData_kws.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+                            inputTensor->bytes :
+                            IFM_DATA_SIZE;
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data()));
+    return true;
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    REQUIRE(RunInference(model, input_goldenFV));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running random inference with TensorFlow Lite Micro and DsCnnModel Int8", "[DS_CNN]")
+{
+    arm::app::DsCnnModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running inference with TensorFlow Lite Micro and DsCnnModel Uint8", "[DS_CNN]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        const int8_t* input_goldenFV = get_ifm_data_array(i);;
+        const int8_t* output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::DsCnnModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
diff --git a/tests/use_case/kws/KWSHandlerTest.cc b/tests/use_case/kws/KWSHandlerTest.cc
new file mode 100644
index 0000000..dee2f6f
--- /dev/null
+++ b/tests/use_case/kws/KWSHandlerTest.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <catch.hpp>
+#include "DsCnnModel.hpp"
+#include "hal.h"
+
+#include "KwsResult.hpp"
+#include "Labels.hpp"
+#include "UseCaseHandler.hpp"
+#include "Classifier.hpp"
+#include "UseCaseCommonUtils.hpp"
+
+TEST_CASE("Model info")
+{
+    /* Model wrapper object. */
+    arm::app::DsCnnModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<arm::app::Model&>("model", model);
+
+    REQUIRE(model.ShowModelInfoHandler());
+}
+
+
+TEST_CASE("Inference by index")
+{
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Model wrapper object. */
+    arm::app::DsCnnModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<int>("frameLength", g_FrameLength);  /* 640 sample length for DSCNN. */
+    caseContext.Set<int>("frameStride", g_FrameStride);  /* 320 sample stride for DSCNN. */
+    caseContext.Set<float>("scoreThreshold", 0.5);       /* Normalised score threshold. */
+
+    arm::app::Classifier classifier;                     /* classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+    auto checker = [&](uint32_t audioIndex, std::vector<uint32_t> labelIndex)
+    {
+        caseContext.Set<uint32_t>("audioIndex", audioIndex);
+
+        std::vector<std::string> labels;
+        GetLabelsVector(labels);
+        caseContext.Set<const std::vector<std::string> &>("labels", labels);
+
+        REQUIRE(arm::app::ClassifyAudioHandler(caseContext, audioIndex, false));
+        REQUIRE(caseContext.Has("results"));
+
+        auto results = caseContext.Get<std::vector<arm::app::kws::KwsResult>>("results");
+
+        REQUIRE(results.size() == labelIndex.size());
+
+        for (size_t i = 0; i < results.size(); i++ ) {
+            REQUIRE(results[i].m_resultVec.size());
+            REQUIRE(results[i].m_resultVec[0].m_labelIdx == labelIndex[i]);
+        }
+
+    };
+
+    SECTION("Index = 0, short clip down")
+    {
+        /* Result: down. */
+        checker(0, {5});
+    }
+
+    SECTION("Index = 1, long clip right->left->up")
+    {
+        /* Result: right->right->left->up->up. */
+        checker(1, {7, 1, 6, 4, 4});
+    }
+
+    SECTION("Index = 2, short clip yes")
+    {
+        /* Result: yes. */
+        checker(2, {2});
+    }
+
+    SECTION("Index = 3, long clip yes->no->go->stop")
+    {
+        /* Result: yes->go->no->go->go->go->stop. */
+        checker(3, {2, 11, 3, 11, 11, 11, 10});
+    }
+}
+
+
+TEST_CASE("Inference run all clips")
+{
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Model wrapper object. */
+    arm::app::DsCnnModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("clipIndex", 0);
+    caseContext.Set<int>("frameLength", g_FrameLength);  /* 640 sample length for DSCNN. */
+    caseContext.Set<int>("frameStride", g_FrameStride);  /* 320 sample stride for DSCNN. */
+    caseContext.Set<float>("scoreThreshold", 0.9);       /* Normalised score threshold. */
+    arm::app::Classifier classifier;                     /* classifier wrapper object. */
+    caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+    std::vector <std::string> labels;
+    GetLabelsVector(labels);
+    caseContext.Set<const std::vector <std::string>&>("labels", labels);
+    REQUIRE(arm::app::ClassifyAudioHandler(caseContext, 0, true));
+}
+
+
+TEST_CASE("List all audio clips")
+{
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Model wrapper object. */
+    arm::app::DsCnnModel model;
+
+    /* Load the model. */
+    REQUIRE(model.Init());
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+
+    REQUIRE(arm::app::ListFilesHandler(caseContext));
+}
\ No newline at end of file
diff --git a/tests/use_case/kws/KwsTests.cc b/tests/use_case/kws/KwsTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/kws/KwsTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/kws/MfccTests.cc b/tests/use_case/kws/MfccTests.cc
new file mode 100644
index 0000000..407861f
--- /dev/null
+++ b/tests/use_case/kws/MfccTests.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 640 samples from yes.wav. */
+const std::vector<int16_t> testWav = std::vector<int16_t>{
+    139, 143, 164, 163, 157, 156, 151, 148, 172, 171,
+    165, 169, 149, 142, 145, 147, 166, 146, 112, 132,
+    132, 136, 165, 176, 176, 152, 138, 158, 179, 185,
+    183, 148, 121, 130, 167, 204, 163, 132, 165, 184,
+    193, 205, 210, 204, 195, 178, 168, 197, 207, 201,
+    197, 177, 185, 196, 191, 198, 196, 183, 193, 181,
+    157, 170, 167, 159, 164, 152, 146, 167, 180, 171,
+    194, 232, 204, 173, 171, 172, 184, 169, 175, 199,
+    200, 195, 185, 214, 214, 193, 196, 191, 204, 191,
+    172, 187, 183, 192, 203, 172, 182, 228, 232, 205,
+    177, 174, 191, 210, 210, 211, 197, 177, 198, 217,
+    233, 236, 203, 191, 169, 145, 149, 161, 198, 206,
+    176, 137, 142, 181, 200, 215, 201, 188, 166, 162,
+    184, 155, 135, 132, 126, 142, 169, 184, 172, 156,
+    132, 119, 150, 147, 154, 160, 125, 130, 137, 154,
+    161, 168, 195, 182, 160, 134, 138, 146, 130, 120,
+    101, 122, 137, 118, 117, 131, 145, 140, 146, 148,
+    148, 168, 159, 134, 114, 114, 130, 147, 147, 134,
+    125, 98, 107, 127, 99, 79, 84, 107, 117, 114,
+    93, 92, 127, 112, 109, 110, 96, 118, 97, 87,
+    110, 95, 128, 153, 147, 165, 146, 106, 101, 137,
+    139, 96, 73, 90, 91, 51, 69, 102, 100, 103,
+    96, 101, 123, 107, 82, 89, 118, 127, 99, 100,
+    111, 97, 111, 123, 106, 121, 133, 103, 100, 88,
+    85, 111, 114, 125, 102, 91, 97, 84, 139, 157,
+    109, 66, 72, 129, 111, 90, 127, 126, 101, 109,
+    142, 138, 129, 159, 140, 80, 74, 78, 76, 98,
+    68, 42, 106, 143, 112, 102, 115, 114, 82, 75,
+    92, 80, 110, 114, 66, 86, 119, 101, 101, 103,
+    118, 145, 85, 40, 62, 88, 95, 87, 73, 64,
+    86, 71, 71, 105, 80, 73, 96, 92, 85, 90,
+    81, 86, 105, 100, 89, 78, 102, 114, 95, 98,
+    69, 70, 108, 112, 111, 90, 104, 137, 143, 160,
+    145, 121, 98, 86, 91, 87, 115, 123, 109, 99,
+    85, 120, 131, 116, 125, 144, 153, 111, 98, 110,
+    93, 89, 101, 137, 155, 142, 108, 94, 136, 145,
+    129, 129, 122, 109, 90, 76, 81, 110, 119, 96,
+    95, 102, 105, 111, 90, 89, 111, 115, 86, 51,
+    107, 140, 105, 105, 110, 142, 125, 76, 75, 69,
+    65, 52, 61, 69, 55, 42, 47, 58, 37, 35,
+    24, 20, 44, 22, 16, 26, 6, 3, 4, 23,
+    60, 51, 30, 12, 24, 31, -9, -16, -13, 13,
+    19, 9, 37, 55, 70, 36, 23, 57, 45, 33,
+    50, 59, 18, 11, 62, 74, 52, 8, -3, 26,
+    51, 48, -5, -9, 12, -7, -12, -5, 28, 41,
+    -2, -30, -13, 31, 33, -12, -22, -8, -15, -17,
+    2, -6, -25, -27, -24, -8, 4, -9, -52, -47,
+    -9, -32, -45, -5, 41, 15, -32, -14, 2, -1,
+    -10, -30, -32, -25, -21, -17, -14, 8, -4, -13,
+    34, 18, -36, -38, -18, -19, -28, -17, -14, -16,
+    -2, -20, -27, 12, 11, -17, -33, -12, -22, -64,
+    -42, -26, -23, -22, -37, -51, -53, -30, -18, -48,
+    -69, -38, -54, -96, -72, -49, -50, -57, -41, -22,
+    -43, -64, -54, -23, -49, -69, -41, -44, -42, -49,
+    -40, -26, -54, -50, -38, -49, -70, -94, -89, -69,
+    -56, -65, -71, -47, -39, -49, -79, -91, -56, -46,
+    -62, -86, -64, -32, -47, -50, -71, -77, -65, -68,
+    -52, -51, -61, -67, -61, -81, -93, -52, -59, -62,
+    -51, -75, -76, -50, -32, -54, -68, -70, -43, 1,
+    -42, -92, -80, -41, -38, -79, -69, -49, -82, -122,
+    -93, -21, -24, -61, -70, -73, -62, -74, -69, -43,
+    -25, -15, -43, -23, -26, -69, -44, -12, 1, -51,
+    -78, -13, 3, -53, -105, -72, -24, -62, -66, -31,
+    -40, -65, -86, -64, -44, -55, -63, -61, -37, -41,
+};
+
+/* Golden audio ops mfcc output for the above wav. */
+const std::vector<float> testWavMfcc {
+    -22.67135, -0.61615, 2.07233, 0.58137, 1.01655, 0.85816, 0.46039, 0.03393, 1.16511, 0.0072,
+};
+
+arm::app::audio::DsCnnMFCC GetMFCCInstance() {
+    const int sampFreq = arm::app::audio::DsCnnMFCC::ms_defaultSamplingFreq;
+    const int frameLenMs = 40;
+    const int frameLenSamples = sampFreq * frameLenMs * 0.001;
+    const int numMfccFeats = 10;
+
+   return arm::app::audio::DsCnnMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuantisedMFCC() {
+    const float quantScale = 1.1088106632232666;
+    const int quantOffset = 95;
+    std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav, quantScale, quantOffset);
+
+    const long min_val = std::numeric_limits<T>::min();
+    const long max_val = std::numeric_limits<T>::max();
+
+    for (size_t i = 0; i < testWavMfcc.size(); ++i){
+        long TestWavMfcc = (std::lround((testWavMfcc[i] / quantScale) + quantOffset));
+        T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+        REQUIRE(quantizedTestWavMfcc  == Approx(mfccOutput[i]).margin(0));
+    }
+}
+template void TestQuantisedMFCC<int8_t>();
+template void TestQuantisedMFCC<uint8_t>();
+template void TestQuantisedMFCC<int16_t>();
+
+
+TEST_CASE("MFCC calculation test") {
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+    hal_platform_init(&platform);
+
+    SECTION("FP32")
+    {
+        auto mfccOutput = GetMFCCInstance().MfccCompute(testWav);
+        REQUIRE_THAT( mfccOutput, Catch::Approx(testWavMfcc).margin(0.0001) );
+    }
+
+    SECTION("int8_t")
+    {
+        TestQuantisedMFCC<int8_t>();
+    }
+
+    SECTION("uint8_t")
+    {
+        TestQuantisedMFCC<uint8_t>();
+    }
+
+    SECTION("MFCC quant calculation test - int16_t")
+    {
+        TestQuantisedMFCC<int16_t>();
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/InferenceTestDSCNN.cc b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
new file mode 100644
index 0000000..f0e5c02
--- /dev/null
+++ b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "hal.h"
+#include "TestData_kws.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace arm {
+namespace app {
+namespace kws {
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+                            inputTensor->bytes :
+                            IFM_DATA_SIZE;
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data()));
+    return true;
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    REQUIRE(RunInference(model, input_goldenFV));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running random inference with Tflu and DsCnnModel Int8", "[DS_CNN]")
+{
+    arm::app::DsCnnModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running inference with Tflu and DsCnnModel Uint8", "[DS_CNN]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        const int8_t* input_goldenFV = get_ifm_data_array(i);
+        const int8_t* output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::DsCnnModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
+
+} //namespace
+} //namespace
+} //namespace
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
new file mode 100644
index 0000000..ee63c2f
--- /dev/null
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+#include "TestData_asr.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace arm {
+namespace app {
+namespace asr {
+
+bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    memcpy(inputTensor->data.data, vec, copySz);
+
+    return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    std::random_device rndDevice;
+    std::mt19937 mersenneGen{rndDevice()};
+    std::uniform_int_distribution<short> dist {-128, 127};
+
+    auto gen = [&dist, &mersenneGen](){
+                   return dist(mersenneGen);
+               };
+
+    std::vector<int8_t> randomAudio(inputTensor->bytes);
+    std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+    REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+    return true;
+}
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    arm::app::Wav2LetterModel model{};
+
+    REQUIRE_FALSE(model.IsInited());
+    REQUIRE(model.Init());
+    REQUIRE(model.IsInited());
+
+    REQUIRE(RunInferenceRandom(model));
+}
+
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+    TfLiteTensor* inputTensor = model.GetInputTensor(0);
+    REQUIRE(inputTensor);
+
+    REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+
+    TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+    REQUIRE(outputTensor);
+    REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+    auto tensorData = tflite::GetTensorData<T>(outputTensor);
+    REQUIRE(tensorData);
+
+    for (size_t i = 0; i < outputTensor->bytes; i++) {
+        REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+    }
+}
+
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+    for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+        auto input_goldenFV = get_ifm_data_array(i);;
+        auto output_goldenFV = get_ofm_data_array(i);
+
+        DYNAMIC_SECTION("Executing inference with re-init")
+        {
+            arm::app::Wav2LetterModel model{};
+
+            REQUIRE_FALSE(model.IsInited());
+            REQUIRE(model.Init());
+            REQUIRE(model.IsInited());
+
+            TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+        }
+    }
+}
+
+} //namespace
+} //namespace
+} //namespace
diff --git a/tests/use_case/kws_asr/InitModels.cc b/tests/use_case/kws_asr/InitModels.cc
new file mode 100644
index 0000000..770944d
--- /dev/null
+++ b/tests/use_case/kws_asr/InitModels.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <catch.hpp>
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Init two Models", "[.]")
+{
+    arm::app::DsCnnModel model1;
+    arm::app::DsCnnModel model2;
+
+    /* Ideally we should load the wav2letter model here, but there is
+     * none available to run on native (ops not supported on unoptimised
+     * version). However, we can certainly create two instances of the
+     * same type of model to see if our tensor arena re-use works as
+     * intended.
+     *
+     * @TODO: uncomment this when this model can run on native pipeline. */
+    //arm::app::Wav2LetterModel model2;     /* model2. */
+
+    /* Load/initialise the first model. */
+    REQUIRE(model1.Init());
+
+    /* Allocator instance should have been created. */
+    REQUIRE(nullptr != model1.GetAllocator());
+
+    /* Load the second model using the same allocator as model 1. */
+    REQUIRE(model2.Init(model1.GetAllocator()));
+
+    /* Make sure they point to the same allocator object. */
+    REQUIRE(model1.GetAllocator() == model2.GetAllocator());
+
+    /* Both models should report being initialised. */
+    REQUIRE(true == model1.IsInited());
+    REQUIRE(true == model2.IsInited());
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/KwsAsrTests.cc b/tests/use_case/kws_asr/KwsAsrTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/kws_asr/KwsAsrTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/kws_asr/MfccTests.cc b/tests/use_case/kws_asr/MfccTests.cc
new file mode 100644
index 0000000..9509519
--- /dev/null
+++ b/tests/use_case/kws_asr/MfccTests.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 640 samples from yes.wav. */
+const std::vector<int16_t> testWav = std::vector<int16_t>{
+    139, 143, 164, 163, 157, 156, 151, 148, 172, 171,
+    165, 169, 149, 142, 145, 147, 166, 146, 112, 132,
+    132, 136, 165, 176, 176, 152, 138, 158, 179, 185,
+    183, 148, 121, 130, 167, 204, 163, 132, 165, 184,
+    193, 205, 210, 204, 195, 178, 168, 197, 207, 201,
+    197, 177, 185, 196, 191, 198, 196, 183, 193, 181,
+    157, 170, 167, 159, 164, 152, 146, 167, 180, 171,
+    194, 232, 204, 173, 171, 172, 184, 169, 175, 199,
+    200, 195, 185, 214, 214, 193, 196, 191, 204, 191,
+    172, 187, 183, 192, 203, 172, 182, 228, 232, 205,
+    177, 174, 191, 210, 210, 211, 197, 177, 198, 217,
+    233, 236, 203, 191, 169, 145, 149, 161, 198, 206,
+    176, 137, 142, 181, 200, 215, 201, 188, 166, 162,
+    184, 155, 135, 132, 126, 142, 169, 184, 172, 156,
+    132, 119, 150, 147, 154, 160, 125, 130, 137, 154,
+    161, 168, 195, 182, 160, 134, 138, 146, 130, 120,
+    101, 122, 137, 118, 117, 131, 145, 140, 146, 148,
+    148, 168, 159, 134, 114, 114, 130, 147, 147, 134,
+    125, 98, 107, 127, 99, 79, 84, 107, 117, 114,
+    93, 92, 127, 112, 109, 110, 96, 118, 97, 87,
+    110, 95, 128, 153, 147, 165, 146, 106, 101, 137,
+    139, 96, 73, 90, 91, 51, 69, 102, 100, 103,
+    96, 101, 123, 107, 82, 89, 118, 127, 99, 100,
+    111, 97, 111, 123, 106, 121, 133, 103, 100, 88,
+    85, 111, 114, 125, 102, 91, 97, 84, 139, 157,
+    109, 66, 72, 129, 111, 90, 127, 126, 101, 109,
+    142, 138, 129, 159, 140, 80, 74, 78, 76, 98,
+    68, 42, 106, 143, 112, 102, 115, 114, 82, 75,
+    92, 80, 110, 114, 66, 86, 119, 101, 101, 103,
+    118, 145, 85, 40, 62, 88, 95, 87, 73, 64,
+    86, 71, 71, 105, 80, 73, 96, 92, 85, 90,
+    81, 86, 105, 100, 89, 78, 102, 114, 95, 98,
+    69, 70, 108, 112, 111, 90, 104, 137, 143, 160,
+    145, 121, 98, 86, 91, 87, 115, 123, 109, 99,
+    85, 120, 131, 116, 125, 144, 153, 111, 98, 110,
+    93, 89, 101, 137, 155, 142, 108, 94, 136, 145,
+    129, 129, 122, 109, 90, 76, 81, 110, 119, 96,
+    95, 102, 105, 111, 90, 89, 111, 115, 86, 51,
+    107, 140, 105, 105, 110, 142, 125, 76, 75, 69,
+    65, 52, 61, 69, 55, 42, 47, 58, 37, 35,
+    24, 20, 44, 22, 16, 26, 6, 3, 4, 23,
+    60, 51, 30, 12, 24, 31, -9, -16, -13, 13,
+    19, 9, 37, 55, 70, 36, 23, 57, 45, 33,
+    50, 59, 18, 11, 62, 74, 52, 8, -3, 26,
+    51, 48, -5, -9, 12, -7, -12, -5, 28, 41,
+    -2, -30, -13, 31, 33, -12, -22, -8, -15, -17,
+    2, -6, -25, -27, -24, -8, 4, -9, -52, -47,
+    -9, -32, -45, -5, 41, 15, -32, -14, 2, -1,
+    -10, -30, -32, -25, -21, -17, -14, 8, -4, -13,
+    34, 18, -36, -38, -18, -19, -28, -17, -14, -16,
+    -2, -20, -27, 12, 11, -17, -33, -12, -22, -64,
+    -42, -26, -23, -22, -37, -51, -53, -30, -18, -48,
+    -69, -38, -54, -96, -72, -49, -50, -57, -41, -22,
+    -43, -64, -54, -23, -49, -69, -41, -44, -42, -49,
+    -40, -26, -54, -50, -38, -49, -70, -94, -89, -69,
+    -56, -65, -71, -47, -39, -49, -79, -91, -56, -46,
+    -62, -86, -64, -32, -47, -50, -71, -77, -65, -68,
+    -52, -51, -61, -67, -61, -81, -93, -52, -59, -62,
+    -51, -75, -76, -50, -32, -54, -68, -70, -43, 1,
+    -42, -92, -80, -41, -38, -79, -69, -49, -82, -122,
+    -93, -21, -24, -61, -70, -73, -62, -74, -69, -43,
+    -25, -15, -43, -23, -26, -69, -44, -12, 1, -51,
+    -78, -13, 3, -53, -105, -72, -24, -62, -66, -31,
+    -40, -65, -86, -64, -44, -55, -63, -61, -37, -41,
+};
+
+/* Golden audio ops mfcc output for the above wav. */
+const std::vector<float> testWavMfcc {
+    -22.67135, -0.61615, 2.07233, 0.58137, 1.01655, 0.85816, 0.46039, 0.03393, 1.16511, 0.0072,
+};
+
+arm::app::audio::DsCnnMFCC GetMFCCInstance() {
+    const int sampFreq = arm::app::audio::DsCnnMFCC::ms_defaultSamplingFreq;
+    const int frameLenMs = 40;
+    const int frameLenSamples = sampFreq * frameLenMs * 0.001;
+    const int numMfccFeats = 10;
+
+   return arm::app::audio::DsCnnMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuntisedMFCC() {
+    const float quantScale = 1.1088106632232666;
+    const int quantOffset = 95;
+    std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav, quantScale, quantOffset);
+
+    const long min_val = std::numeric_limits<T>::min();
+    const long max_val = std::numeric_limits<T>::max();
+
+    for (size_t i = 0; i < testWavMfcc.size(); ++i){
+        long TestWavMfcc = (std::lround((testWavMfcc[i] / quantScale) + quantOffset));
+        T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+        REQUIRE(quantizedTestWavMfcc  == Approx(mfccOutput[i]).margin(0));
+    }
+}
+template void TestQuntisedMFCC<int8_t>();
+template void TestQuntisedMFCC<uint8_t>();
+template void TestQuntisedMFCC<int16_t>();
+
+TEST_CASE("MFCC calculation test")
+{
+    hal_platform    platform;
+    data_acq_module dataAcq;
+    data_psn_module dataPsn;
+    platform_timer  timer;
+
+    /* Initialise the HAL and platform. */
+    hal_init(&platform, &dataAcq, &dataPsn, &timer);
+    hal_platform_init(&platform);
+
+    SECTION("FP32")
+    {
+        auto mfccOutput = GetMFCCInstance().MfccCompute(testWav);
+        REQUIRE_THAT( mfccOutput, Catch::Approx( testWavMfcc ).margin(0.0001) );
+    }
+
+    SECTION("int8_t")
+    {
+        TestQuntisedMFCC<int8_t>();
+    }
+
+    SECTION("uint8_t")
+    {
+        TestQuntisedMFCC<uint8_t>();
+    }
+
+    SECTION("MFCC quant calculation test - int16_t")
+    {
+        TestQuntisedMFCC<int16_t>();
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
new file mode 100644
index 0000000..6fd7df3
--- /dev/null
+++ b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+template <typename T>
+static TfLiteTensor GetTestTensor(std::vector <int>& shape,
+                                  T                  initVal,
+                                  std::vector<T>&    vectorBuf)
+{
+    REQUIRE(0 != shape.size());
+
+    shape.insert(shape.begin(), shape.size());
+    uint32_t sizeInBytes = sizeof(T);
+    for (size_t i = 1; i < shape.size(); ++i) {
+        sizeInBytes *= shape[i];
+    }
+
+    /* Allocate mem. */
+    vectorBuf = std::vector<T>(sizeInBytes, initVal);
+    TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(shape.data());
+    return tflite::testing::CreateQuantizedTensor(
+                                vectorBuf.data(), dims,
+                                1, 0, "test-tensor");
+}
+
+TEST_CASE("Checking return value")
+{
+    SECTION("Mismatched post processing parameters and tensor size")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 1, 13};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+        REQUIRE(false == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+
+    SECTION("Post processing succeeds")
+    {
+        const uint32_t ctxLen = 5;
+        const uint32_t innerLen = 3;
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+        std::vector <int> tensorShape = {1, 1, 13, 1};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+    }
+}
+
+TEST_CASE("Postprocessing - erasing required elements")
+{
+    constexpr uint32_t ctxLen = 5;
+    constexpr uint32_t innerLen = 3;
+    constexpr uint32_t nRows = 2*ctxLen + innerLen;
+    constexpr uint32_t nCols = 10;
+    constexpr uint32_t blankTokenIdx = nCols - 1;
+    std::vector <int> tensorShape = {1, 1, nRows, nCols};
+
+    SECTION("First and last iteration")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should not erase anything. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+        REQUIRE(originalVec == tensorVec);
+    }
+
+    SECTION("Right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase the right context only. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+        REQUIRE(originalVec != tensorVec);
+
+        /* The last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+                }
+
+                /* Check left context is preserved. */
+                CHECK(tensorVec[i*nCols + j] == originalVec[i*nCols + j]);
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Left and right context erase")
+    {
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* This step should erase right context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+        REQUIRE(originalVec != tensorVec);
+
+        /* The first and last ctxLen * 10 elements should be gone. */
+        for (size_t i = 0; i < ctxLen; ++i) {
+            for (size_t j = 0; j < nCols; ++j) {
+                /* Check left and right context elements are zeroed. */
+                if (j == blankTokenIdx) {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i * nCols + j] == 1);
+                    CHECK(tensorVec[i * nCols + j] == 1);
+                } else {
+                    CHECK(tensorVec[(ctxLen + innerLen) * nCols + i * nCols + j] == 0);
+                    CHECK(tensorVec[i * nCols + j] == 0);
+                }
+            }
+        }
+
+        /* Check inner elements are preserved. */
+        for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+            /* Check left context is preserved. */
+            CHECK(tensorVec[i] == originalVec[i]);
+        }
+    }
+
+    SECTION("Try left context erase")
+    {
+        /* Should not be able to erase the left context if it is the first iteration. */
+        arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+        std::vector <int8_t> tensorVec;
+        TfLiteTensor tensor = GetTestTensor<int8_t>(
+                                tensorShape, 100, tensorVec);
+
+        /* Copy elements to compare later. */
+        std::vector <int8_t> originalVec = tensorVec;
+
+        /* Calling it the second time should erase the left context. */
+        REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+        REQUIRE(originalVec == tensorVec);
+    }
+}
\ No newline at end of file
diff --git a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
new file mode 100644
index 0000000..e71366a
--- /dev/null
+++ b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+constexpr uint32_t numMfccFeatures = 13;
+constexpr uint32_t numMfccVectors  = 10;
+
+/* Test vector output: generated using test-asr-preprocessing.py. */
+int8_t expectedResult[numMfccVectors][numMfccFeatures*3] = {
+    /* Feature vec 0. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,    /* MFCCs.   */
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,    /* Delta 1. */
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,    /* Delta 2. */
+
+    /* Feature vec 1. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 3. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 4 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -38, -29,  -9,   1,  -2,  -7,  -8,  -8, -12, -16, -14,  -5,   5,
+    -68, -50, -13,   5,   0,  -9,  -9,  -8, -13, -20, -19,  -3,  15,
+
+    /* Feature vec 5 : this should have valid delta 1 and delta 2. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+    -62, -45, -11,   5,   0,  -8,  -9,  -8, -12, -19, -17,  -3,  13,
+    -27, -22, -13,  -9, -11, -12, -12, -11, -11, -13, -13, -10,  -6,
+
+    /* Feature vec 6. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 7. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 8. */
+    -32,   4,  -9,  -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+    /* Feature vec 9. */
+    -31,   4,  -9,  -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+    -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+    -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+};
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+    constexpr int int16max = std::numeric_limits<int16_t>::max();
+    int val = 0;
+    for (size_t i = 0; i < vec.size(); ++i, ++val) {
+
+        /* We want a differential filter response from both - order 1
+         * and 2 => Don't have a linear signal here - we use a signal
+         * using squares for example. Alternate sign flips might work
+         * just as well and will be computationally less work! */
+        int valsq = val * val;
+        if (valsq > int16max) {
+            val = 0;
+            valsq = 0;
+        }
+        vec[i] = valsq;
+    }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+    /* Initialise the HAL and platform. */
+    hal_platform    platform;
+    data_acq_module data_acq;
+    data_psn_module data_psn;
+    platform_timer  timer;
+    hal_init(&platform, &data_acq, &data_psn, &timer);
+    hal_platform_init(&platform);
+
+    /* Constants. */
+    const uint32_t  windowLen       = 512;
+    const uint32_t  windowStride    = 160;
+    const int       dimArray[]      = {3, 1, numMfccFeatures * 3, numMfccVectors};
+    const float     quantScale      = 0.1410219967365265;
+    const int       quantOffset     = -11;
+
+    /* Test wav memory. */
+    std::vector <int16_t> testWav((windowStride * numMfccVectors) +
+                                  (windowLen - windowStride));
+
+    /* Populate with dummy input. */
+    PopulateTestWavVector(testWav);
+
+    /* Allocate mem for tensor. */
+    std::vector<int8_t> tensorVec(dimArray[1]*dimArray[2]*dimArray[3]);
+
+    /* Initialise dimensions and the test tensor. */
+    TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+    TfLiteTensor tensor = tflite::testing::CreateQuantizedTensor(
+        tensorVec.data(), dims, quantScale, quantOffset, "preprocessedInput");
+
+    /* Initialise pre-processing module. */
+    arm::app::audio::asr::Preprocess prep{
+        numMfccFeatures, windowLen, windowStride, numMfccVectors};
+
+    /* Invoke pre-processing. */
+    REQUIRE(prep.Invoke(testWav.data(), testWav.size(), &tensor));
+
+    /* Wrap the tensor with a std::vector for ease. */
+    int8_t * tensorData = tflite::GetTensorData<int8_t>(&tensor);
+    std::vector <int8_t> vecResults =
+        std::vector<int8_t>(tensorData, tensorData + tensor.bytes);
+
+    /* Check sizes. */
+    REQUIRE(vecResults.size() == sizeof(expectedResult));
+
+    /* Check that the elements have been calculated correctly. */
+    for (uint32_t j = 0; j < numMfccVectors; ++j) {
+        for (uint32_t i = 0; i < numMfccFeatures * 3; ++i) {
+            size_t tensorIdx = (j * numMfccFeatures * 3) + i;
+            CHECK(vecResults[tensorIdx] == expectedResult[j][i]);
+        }
+    }
+}
diff --git a/tests/utils/ImageUtils.cc b/tests/utils/ImageUtils.cc
new file mode 100644
index 0000000..f77ce1e
--- /dev/null
+++ b/tests/utils/ImageUtils.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ImageUtils.hpp"
+
+void convertImgIoInt8(void * data, const size_t sz)
+{
+    uint8_t * tmp_req_data          = (uint8_t *)data;
+    int8_t * tmp_signed_req_data    = (int8_t *) data;
+
+    for (size_t i = 0; i < sz; ++i) {
+        tmp_signed_req_data[i] = (int8_t)(
+                (int32_t)(tmp_req_data[i]) - 128);
+    }
+}
+
+void convertImgIoGreyscale(const uint8_t * srcPtr, uint8_t * dstPtr, const size_t sz)
+{
+    for (size_t i = 0; i < sz; ++i, srcPtr += 3) {
+        *dstPtr++ = 0.2989 * (*srcPtr) +
+                    0.587 * (*(srcPtr+1)) +
+                    0.114 * (*(srcPtr+2));
+    }
+}
\ No newline at end of file
diff --git a/tests/utils/ImageUtils.hpp b/tests/utils/ImageUtils.hpp
new file mode 100644
index 0000000..838dcef
--- /dev/null
+++ b/tests/utils/ImageUtils.hpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef IMAGEUTILS_HPP
+#define IMAGEUTILS_HPP
+
+#include <catch.hpp>
+
+void convertImgIoInt8(void * data, const size_t sz);
+
+void convertImgIoGreyscale(const uint8_t * srcPtr, uint8_t * dstPtr, const size_t sz);
+
+#endif /* IMAGEUTILS_HPP */