Opensource ML embedded evaluation kit

Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ba6dc28
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,17 @@
+# IDE related
+.vscode
+.idea
+
+# Downloaded files
+
+# Build files
+CMakeFiles
+build
+cmake-build-*
+
+# Virtual environments
+scripts/py/env*
+scripts/py/venv*
+env
+venv
+__pycache__*
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..06532f6
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,12 @@
+[submodule "dependencies/tensorflow"]
+	path = dependencies/tensorflow
+	url = https://github.com/tensorflow/tensorflow
+[submodule "dependencies/cmsis"]
+	path = dependencies/cmsis
+	url = https://github.com/ARM-software/CMSIS_5.git
+[submodule "dependencies/core_driver"]
+	path = dependencies/core_driver
+	url = https://review.mlplatform.org/ml/ethos-u/ethos-u-core-driver
+[submodule "dependencies/core-software"]
+	path = dependencies/core-software
+	url = https://review.mlplatform.org/ml/ethos-u/ethos-u-core-software
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..e2f109c
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,435 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+# minimum version of cmake = 3.15 for legit reason:
+# armclang support doesn't work work in previous releases
+cmake_minimum_required(VERSION 3.15.0)
+
+# Build in release mode by default
+if (NOT CMAKE_BUILD_TYPE STREQUAL Debug)
+    set(CMAKE_BUILD_TYPE Release CACHE INTERNAL "")
+endif()
+
+message(STATUS "Build type is set to ${CMAKE_BUILD_TYPE}")
+
+# Set language standards. TensorFlow Lite requires
+# std=c++11.
+set(CMAKE_C_STANDARD   99)
+set(CMAKE_CXX_STANDARD 11)
+
+# Make the standard a requirement => prevent fallback to previous
+# supported standard
+set(CMAKE_C_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+# We want to pass standard C/C++ flags, without gnu extensions
+set(CMAKE_C_EXTENSIONS OFF)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+project(arm_ethos_u55_eval
+        VERSION     21.03
+        DESCRIPTION "ARM Ethos-U55 Evaluation application for MPS3 FPGA Prototyping Board and FastModel")
+
+add_compile_definitions(PRJ_VER_STR="${PROJECT_VERSION}")
+add_compile_definitions(PRJ_DES_STR="${PROJECT_DESCRIPTION}")
+
+set(CMAKE_SCRIPTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/)
+set(DOWNLOAD_DEP_DIR  ${CMAKE_BINARY_DIR}/dependencies)
+
+include(${CMAKE_SCRIPTS_DIR}/source_gen_utils.cmake)
+include(${CMAKE_SCRIPTS_DIR}/util_functions.cmake)
+
+if (${CMAKE_BINARY_DIR} STREQUAL ${CMAKE_SOURCE_DIR})
+    message(FATAL_ERROR "Source and build are in the same directory")
+else()
+    message(STATUS "Source directory: ${CMAKE_SOURCE_DIR}")
+    message(STATUS "Binary directory: ${CMAKE_BINARY_DIR}")
+endif()
+
+USER_OPTION(LOG_LEVEL "Log level for the application"
+    LOG_LEVEL_INFO
+    STRING)
+
+USER_OPTION(TENSORFLOW_SRC_PATH "Path to the root of the tensor flow directory"
+    "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/tensorflow"
+    PATH)
+
+USER_OPTION(TARGET_PLATFORM "Target platform to execute evaluation application: mps3, simple_platform, native"
+    mps3
+    STRING)
+
+USER_OPTION(TARGET_SUBSYSTEM "Specify platform target subsystem: sse-200, sse-300 or none"
+    sse-300
+    STRING)
+
+USER_OPTION(ETHOS_U55_ENABLED "Select if Ethos-U55 is available for the platform and subsystem"
+    ON
+    BOOL)
+
+USER_OPTION(USE_CASE_BUILD "Optional. Defines the use-case to build from the available sources. By default, all use-cases are built."
+    all
+    STRING)
+
+USER_OPTION(CPU_PROFILE_ENABLED "Output CPU performance profiling information. Should be used only for MPS3 board."
+    OFF
+    BOOL)
+
+if (TARGET_PLATFORM STREQUAL mps3)
+    message(STATUS "Platform: MPS3 FPGA Prototyping Board or SSE-XXX FVP")
+elseif (TARGET_PLATFORM STREQUAL simple_platform)
+    message(STATUS "Platform: Simple platform within minimal peripherals")
+elseif (TARGET_PLATFORM STREQUAL native)
+    message(STATUS "Platform: Native (Linux based x86_64/aarch64 system)")
+else ()
+    message(FATAL_ERROR "Invalid platform specified: ${TARGET_PLATFORM}")
+endif ()
+
+enforce_compiler_version()
+setup_source_generator()
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+set(SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/source)
+
+if (CPU_PROFILE_ENABLED)
+    set(PROFILING_OPT "${PROFILING_OPT} -DCPU_PROFILE_ENABLED")
+endif()
+
+# Include platform specific sources
+if (TARGET_PLATFORM STREQUAL native)
+    set(PLATFORM_SOURCES_CMAKE_FILE ${CMAKE_SCRIPTS_DIR}/${TARGET_PLATFORM}-sources.cmake)
+else ()
+    set(PLATFORM_SOURCES_CMAKE_FILE ${CMAKE_SCRIPTS_DIR}/bare-metal-sources.cmake)
+
+    USER_OPTION(CMSIS_SRC_PATH
+        "Path to CMSIS-5 sources"
+        "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/cmsis"
+        PATH
+        )
+
+    if (CMAKE_BUILD_TYPE STREQUAL Debug AND CMAKE_CXX_COMPILER_ID STREQUAL ARMClang)
+        USER_OPTION(ARMCLANG_DEBUG_DWARF_LEVEL
+            "Dwarf conformance level for armclang toolchain"
+            "4" # Default = 4 (Arm-DS etc). For model debugger specify "3"
+            STRING
+            )
+    elseif (DEFINED ARMCLANG_DEBUG_DWARF_LEVEL)
+        message(WARNING "ARMCLANG_DEBUG_DWARF_LEVEL definition is unsupported"
+                        "within current configuration. Removing definition...")
+        unset(ARMCLANG_DEBUG_DWARF_LEVEL CACHE)
+    endif()
+
+endif ()
+message(STATUS "Including ${PLATFORM_SOURCES_CMAKE_FILE}")
+include(${PLATFORM_SOURCES_CMAKE_FILE})
+
+if (${CMAKE_CROSSCOMPILING})
+    enable_language(ASM)
+
+    # For non-native builds, we build with CMSIS-DSP support.
+    include(${CMAKE_SCRIPTS_DIR}/cmsis-dsp.cmake)
+
+    # All CMSIS headers to be used:
+    set(CMSIS_HEADERS
+        ${CMSIS_DSP_INC_DIR}
+        ${CMSIS_CORE_INC_DIR}
+        ${CMSIS_SRC_PATH}/Device/ARM/ARMCM55/Include
+        ${CMSIS_SRC_PATH}/Device/ARM/ARMCM55/Include/Template)
+endif ()
+
+# If we need NPU libraries:
+if (ETHOS_U55_ENABLED)
+
+    message(STATUS "Using ARM Ethos-U55 - adding core-driver and timing-adapter-driver includes and libraries")
+    USER_OPTION(ETHOS_U55_TIMING_ADAPTER_SRC_PATH
+        "Path to Ethos-U55 timing adapter sources"
+        "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/core-software/drivers/timing_adapter"
+        PATH
+        )
+
+    USER_OPTION(ETHOS_U55_DRIVER_SRC_PATH
+        "Path to Ethos-U55 core driver sources"
+        "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/core_driver"
+        PATH
+        )
+
+    include_directories("${ETHOS_U55_TIMING_ADAPTER_SRC_PATH}/include/")
+
+    add_subdirectory("${ETHOS_U55_TIMING_ADAPTER_SRC_PATH}" ${CMAKE_BINARY_DIR}/timing-adapter)
+
+    set(ETHOSU_INCLUDES ${ETHOS_U55_TIMING_ADAPTER_SRC_PATH}/include
+                        ${ETHOS_U55_DRIVER_SRC_PATH}/include)
+
+    list(APPEND ETHOS_U55_LIBS ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libtiming_adapter.a)
+endif ()
+
+include(${CMAKE_SCRIPTS_DIR}/tensorflow.cmake)
+
+set(DEP_TENSORFLOW_LITE_MICRO_SUB_DIR ${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro)
+set(DEP_TENSORFLOW_LITE_MICRO_MAKE_DIR ${DEP_TENSORFLOW_LITE_MICRO_SUB_DIR}/tools/make/targets)
+set(DEP_FLATBUF_INCLUDE ${DEP_TENSORFLOW_LITE_MICRO_SUB_DIR}/tools/make/downloads/flatbuffers/include)
+
+set(TENSORFLOW_LIBRARY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME})
+
+set(DEP_TF_INCLUDE_DIRS
+    ${TENSORFLOW_SRC_PATH}
+    ${DEP_TENSORFLOW_LITE_MICRO_SUB_DIR}
+    ${ETHOSU_INCLUDES}
+    ${CMSIS_HEADERS}
+    )
+
+## All TPIP includes
+set(DEP_RUNTIME_INCLUDE_DIRS
+    ${DEP_TF_INCLUDE_DIRS}
+    ${DEP_FLATBUF_INCLUDE}
+    )
+
+# Our entry point into tensorflow world:
+file(GLOB_RECURSE SRC_TENSORFLOW_LITE_MICRO
+    ${SRC_PATH}/application/tensorflow-lite-micro/**/*.cc
+    ${SRC_PATH}/application/tensorflow-lite-micro/*.cc
+    )
+
+set(HAL_DIR ${SRC_PATH}/application/hal)
+
+# HAL API sources
+file(GLOB_RECURSE SRC_HAL
+    "${HAL_DIR}/hal.c"
+    )
+
+# Set platform specific HAL sources; these should be provided
+# by each platform's cmake include file
+list(APPEND SRC_HAL ${SRC_PLAT_HAL})
+
+# Include directories:
+set(APPLICATION_INCLUDE_DIRS
+    ${HAL_DIR}/include
+    ${SRC_PATH}/application/tensorflow-lite-micro/include
+    ${SRC_PATH}/application/main/include
+    ${PLAT_INCLUDE_DIRS}
+    )
+
+file(GLOB_RECURSE SRC_APPLICATION
+    "${SRC_PATH}/application/main/*.cc"
+    "${SRC_PATH}/application/main/*.cpp"
+    "${SRC_PATH}/application/main/*.c"
+    "${SRC_PATH}/application/main/**/*.cc"
+    "${SRC_PATH}/application/main/**/*.cpp"
+    "${SRC_PATH}/application/main/**/*.c"
+    )
+list(FILTER SRC_APPLICATION EXCLUDE REGEX ".*main\\.c.*$")
+
+list(JOIN USE_CASE_BUILD "" USE_CASE_BUILD_STR)
+if (${USE_CASE_BUILD_STR} STREQUAL all)
+    SUBDIRLIST(USE_CASES ${SRC_PATH}/use_case/)
+else()
+    set(USE_CASES ${USE_CASE_BUILD})
+endif()
+
+set(TEST_SRCS  ${CMAKE_CURRENT_SOURCE_DIR}/tests)
+if (NOT ${CMAKE_CROSSCOMPILING})
+
+    #Test TPIP
+    set(TEST_TPIP ${DOWNLOAD_DEP_DIR}/test)
+    file(MAKE_DIRECTORY ${TEST_TPIP})
+    set(TEST_TPIP_INCLUDE ${TEST_TPIP}/include)
+    file(MAKE_DIRECTORY ${TEST_TPIP_INCLUDE})
+
+    include(ExternalProject)
+
+    ExternalProject_Add(catch2-headers
+        URL https://github.com/catchorg/Catch2/releases/download/v2.11.1/catch.hpp
+        DOWNLOAD_NO_EXTRACT 1
+        CONFIGURE_COMMAND ""
+        BUILD_COMMAND bash -c "cp -R <DOWNLOAD_DIR>/catch.hpp ${TEST_TPIP_INCLUDE}"
+        INSTALL_COMMAND ""
+        )
+endif ()
+
+message(STATUS "Building use-cases: ${USE_CASES}.")
+foreach(use_case ${USE_CASES})
+
+    if (EXISTS ${SRC_PATH}/use_case/${use_case})
+        message(STATUS "Found sources for use-case ${use_case}")
+    else ()
+        message(FATAL_ERROR "Faild to find sources for ${use_case} in ${SRC_PATH}/use_case/${use_case}!")
+    endif ()
+    # Executable application:
+    set(TARGET_NAME "ethos-u-${use_case}")
+
+    set(DEFAULT_MODEL_DIR   ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/models)
+    set(SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/src)
+    set(INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/include)
+
+    # Remove old files and recreate dirs
+    file(REMOVE_RECURSE ${SRC_GEN_DIR} ${INC_GEN_DIR})
+    file(MAKE_DIRECTORY ${SRC_GEN_DIR} ${INC_GEN_DIR})
+
+    file(GLOB_RECURSE UC_SRC
+        "${SRC_PATH}/use_case/${use_case}/src/*.cpp"
+        "${SRC_PATH}/use_case/${use_case}/src/*.cc"
+        "${SRC_PATH}/use_case/${use_case}/src/*.c"
+        "${SRC_PATH}/use_case/${use_case}/src/**/*.cpp"
+        "${SRC_PATH}/use_case/${use_case}/src/**/*.cc"
+        "${SRC_PATH}/use_case/${use_case}/src/**/*.c"
+        )
+
+    set(UC_INCLUDE
+        ${SRC_PATH}/use_case/${use_case}/include
+        )
+
+    file(GLOB UC_CMAKE_FILE
+        "${SRC_PATH}/use_case/${use_case}/*.cmake"
+        )
+
+    include(${UC_CMAKE_FILE})
+
+    file(GLOB_RECURSE SRC_GEN
+        "${SRC_GEN_DIR}/*.cc"
+        "${SRC_GEN_DIR}/*.cpp"
+        "${SRC_GEN_DIR}/*.c"
+        )
+
+    set(SRC_MAIN
+        "${SRC_PATH}/application/main/Main.cc"
+        )
+
+    set(UC_LIB_NAME lib${TARGET_NAME})
+
+    # Consolidated application static lib:
+    add_library(${UC_LIB_NAME} STATIC
+        ${SRC_APPLICATION}
+        ${SRC_TENSORFLOW_LITE_MICRO}
+        ${SRC_HAL}
+        ${UC_SRC}
+        ${SRC_GEN}
+        )
+    target_include_directories(${UC_LIB_NAME} PUBLIC
+        ${APPLICATION_INCLUDE_DIRS}
+        ${DEP_RUNTIME_INCLUDE_DIRS}
+        ${UC_INCLUDE}
+        ${INC_GEN_DIR}
+        )
+
+    # Set the activation buffer size
+    target_compile_definitions(${UC_LIB_NAME} PUBLIC
+            "ACTIVATION_BUF_SZ=${${use_case}_ACTIVATION_BUF_SZ}")
+
+    add_dependencies(${UC_LIB_NAME} tensorflow-lite-micro)
+
+    if (${CMAKE_CROSSCOMPILING})
+        # If we are building timing adapter, set the dependency:
+        if (ETHOS_U55_ENABLED)
+            message(STATUS "Adding timing_adapter as a dependency to ${UC_LIB_NAME}")
+            add_dependencies(${UC_LIB_NAME} timing_adapter)
+        endif()
+
+        # If building with CMSIS-DSP support:
+        if (DEFINED CMSIS_DSP_TARGET)
+            message(STATUS "Adding ${CMSIS_DSP_TARGET} as a dependency to ${UC_LIB_NAME}")
+            add_dependencies(${UC_LIB_NAME} ${CMSIS_DSP_TARGET})
+        endif()
+    endif()
+
+    target_link_libraries(${UC_LIB_NAME} PUBLIC
+        ${TENSORFLOW_LIBRARY}
+        $<$<BOOL:${ETHOS_U55_ENABLED}>:${ETHOS_U55_LIBS}>
+        $<$<BOOL:${CMSIS_DSP_LIB}>:${CMSIS_DSP_LIB}>)
+
+    add_executable(${TARGET_NAME} ${SRC_MAIN})
+
+    target_link_libraries(${TARGET_NAME} ${UC_LIB_NAME})
+
+    if (${CMAKE_CROSSCOMPILING})
+        set_target_properties(${TARGET_NAME} PROPERTIES SUFFIX ".axf")
+    endif()
+
+    if (${TARGET_PLATFORM} STREQUAL mps3)
+
+        SET(SECTORS_DIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/sectors/${use_case})
+        file(REMOVE_RECURSE ${SECTORS_DIR})
+        file(MAKE_DIRECTORY ${SECTORS_DIR})
+
+        add_custom_command(TARGET ${TARGET_NAME}
+            POST_BUILD
+            COMMAND fromelf --bin --output=${SECTORS_DIR}/
+            ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET_NAME}.axf)
+
+        add_custom_target(
+            run-${use_case} ALL
+            COMMAND ${PYTHON} ${CMAKE_CURRENT_SOURCE_DIR}/scripts/py/gen_fpga_mem_map.py
+            --scatter_file_path ${SCAT_FILE}
+            --target_subsystem  ${TARGET_SUBSYSTEM}
+            --output_file_path  ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/images-${use_case}.txt
+            COMMENT "Generating FPGA mappings file")
+    elseif (${TARGET_PLATFORM} STREQUAL native)
+        # Add tests only if they exists for the usecase
+        if (EXISTS ${TEST_SRCS}/use_case/${use_case})
+
+            set(TEST_RESOURCES_INCLUDE
+                "${TEST_SRCS}/utils/"
+                "${TEST_SRCS}/resources/golden_fv/"
+                )
+
+            # Define Test sources and new target to run unit tests
+            file(GLOB_RECURSE TEST_SOURCES
+                "${TEST_SRCS}/common/*.cpp"
+                "${TEST_SRCS}/common/*.cc"
+                "${TEST_SRCS}/utils/*.cc"
+                "${TEST_SRCS}/utils/*.cpp"
+                "${TEST_SRCS}/use_case/${use_case}/*.cpp"
+                "${TEST_SRCS}/use_case/${use_case}/*.cc"
+                "${TEST_SRCS}/use_case/${use_case}/*.c"
+                "${TEST_SRCS}/use_case/${use_case}/**/*.cpp"
+                "${TEST_SRCS}/use_case/${use_case}/**/*.cc"
+                "${TEST_SRCS}/use_case/${use_case}/**/*.c"
+                )
+
+            if (DEFINED ${use_case}_TEST_IFM AND DEFINED ${use_case}_TEST_OFM)
+                message(STATUS  "Test vectors are available for ${${use_case}_MODEL_TFLITE_PATH} "
+                                "Input: ${${use_case}_TEST_IFM} "
+                                "Output: ${${use_case}_TEST_OFM}")
+
+                set(TEST_SRC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/src)
+                set(TEST_INC_GEN_DIR ${CMAKE_BINARY_DIR}/generated/${use_case}/tests/include)
+                file(GLOB_RECURSE TEST_SOURCES_GEN
+                    "${TEST_SRC_GEN_DIR}/*.cc"
+                    "${TEST_SRC_GEN_DIR}/**/*.cc"
+                    )
+                message(STATUS "Adding ${TEST_SOURCES_GEN} to test sources")
+                list(APPEND TEST_SOURCES ${TEST_SOURCES_GEN})
+                list(APPEND TEST_RESOURCES_INCLUDE ${TEST_INC_GEN_DIR})
+            endif()
+
+            set(TEST_TARGET_NAME "${CMAKE_PROJECT_NAME}-${use_case}-tests")
+            add_executable(${TEST_TARGET_NAME} ${TEST_SOURCES})
+            target_include_directories(${TEST_TARGET_NAME} PUBLIC
+                ${TEST_TPIP_INCLUDE} ${TEST_RESOURCES_INCLUDE})
+            target_link_libraries(${TEST_TARGET_NAME} libethos-u-${use_case})
+            target_compile_definitions(${TEST_TARGET_NAME} PRIVATE
+                "ACTIVATION_BUF_SZ=${${use_case}_ACTIVATION_BUF_SZ}"
+                TESTS)
+
+            add_dependencies(
+                "${TEST_TARGET_NAME}"
+                "catch2-headers"
+            )
+        endif ()
+    endif ()
+endforeach()
+
+print_useroptions()
diff --git a/LICENSE_APACHE_2.0.txt b/LICENSE_APACHE_2.0.txt
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE_APACHE_2.0.txt
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/Readme.md b/Readme.md
new file mode 100644
index 0000000..db62515
--- /dev/null
+++ b/Readme.md
@@ -0,0 +1,89 @@
+# Arm® ML embedded evaluation kit
+
+This repository is for building and deploying Machine Learning (ML) applications targeted for Arm® Cortex®-M and Arm®
+Ethos™-U NPU.
+To run evaluations using this software, we suggest using an [MPS3 board](https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/mps3)
+or a fixed virtual platform (FVP) that supports Ethos-U55 software fast model. Both environments run a combination of
+the new [Arm® Cortex™-M55 processor](https://www.arm.com/products/silicon-ip-cpu/cortex-m/cortex-m55) and the
+[Arm® Ethos™-U55 NPU](https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-u55).
+
+## Overview of the evaluation kit
+
+The purpose of the evaluation kit is to allow the user to develop software and test the performance of the Ethos-U55 NPU and
+Cortex-M55 CPU. The Ethos-U55 NPU is a new class of machine learning(ML) processor, specifically designed to accelerate
+computation for ML workloads in constrained embedded and IoT devices. The product is optimized to execute
+mathematical operations efficiently that are commonly used in ML algorithms, such as convolutions or activation functions.
+
+## ML use cases
+
+The evaluation kit adds value by providing ready to use ML applications for the embedded stack. As a result, you can
+experiment with the already developed software use cases and create your own applications for Cortex-M CPU and Ethos-U NPU.
+The example application at your disposal and the utilized models are listed in the table below.
+
+|   ML application                     |  Description | Neural Network Model |
+| :----------------------------------: | :-----------------------------------------------------: | :----: |
+|  [Image classification](./docs/use_cases/img_class.md)              | Recognize the presence of objects in a given image | [Mobilenet V2](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8)   |
+|  [Keyword spotting(KWS)](./docs/use_cases/kws.md)             | Recognize the presence of a key word in a recording | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8) |
+|  [Automated Speech Recognition(ASR)](./docs/use_cases/asr.md) | Transcribe words in a recording | [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8) |
+|  [KWS and ASR](./docs/use_cases/kws_asr.md) | Utilise Cortex-M and Ethos-U to transcribe words in a recording after a keyword was spotted | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8)  [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8) |
+|  [Anomaly Detection](./docs/use_cases/ad.md)                 | Detecting abnormal behavior based on a sound recording of a machine | Coming soon|
+| [Generic inference runner](./docs/use_cases/inference_runner.md) | Code block allowing you to develop your own use case for Ethos-U55 NPU | Your custom model |
+
+The above use cases implement end-to-end ML flow including data pre-processing and post-processing. They will allow you
+to investigate embedded software stack, evaluate performance of the networks running on Cortex-M55 CPU and Ethos-U55 NPU
+by displaying different performance metrics such as inference cycle count estimation and results of the network execution.
+
+## Software and hardware overview
+
+The evaluation kit is based on the [Arm® Corstone™-300 reference package](https://developer.arm.com/ip-products/subsystem/corstone/corstone-300).
+Arm® Corstone™-300 helps you build SoCs quickly on the Arm® Cortex™-M55 and Arm® Ethos™-U55 designs. Arm® Corstone™-300 design
+implementation is publicly available on an [Arm MPS3 FPGA board](https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/download-fpga-images),
+or as a [Fixed Virtual Platform of the MPS3 development board](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+The Ethos-U NPU software stack is described [here](https://developer.arm.com/documentation/101888/0500/NPU-software-overview/NPU-software-components?lang=en).
+
+All ML use cases, albeit illustrating a different application, have common code such as initializing the Hardware
+Abstraction Layer (HAL). The application common code can be run on x86 or Arm Cortex-M architecture thanks to the HAL.
+For the ML application-specific part, Google® TensorFlow™ Lite for Microcontrollers inference engine is used to schedule
+the neural networks models executions. TensorFlow Lite for Microcontrollers is integrated with the [Ethos-U55 driver](https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git)
+and delegates execution of certain operators to the NPU or, if the neural network model operators are not supported on
+NPU, to the CPU. [CMSIS-NN](https://github.com/ARM-software/CMSIS_5) is used to optimise CPU workload execution
+with int8 data type.
+Common ML application functions will help you to focus on implementing logic of your custom ML use case: you can modify
+only the use case code and leave all other components unchanged. Supplied build system will discover new ML application
+code and automatically include it into compilation flow.
+
+![APIs](./docs/media/APIs_description.png)
+
+To run an ML application on the Cortex-M and Ethos-U55 NPU, please, follow these steps:
+
+1. Setup your environment by installing [the required prerequisites](./docs/sections/building.md#Build-prerequisites).
+2. Generate an optimized neural network model for Ethos-U with a Vela compiler by following instructions [here](./docs/sections/building.md#Add-custom-model).
+3. [Configure the build system](./docs/sections/building.md#Build-process).
+4. [Compile the project](./docs/sections/building.md#Building-the-configured-project) with a `make` command.
+5. If using a FVP, [launch the desired application on the FVP](./docs/sections/deployment.md#Fixed-Virtual-Platform).
+If using the FPGA option, load the image on the FPGA and [launch the application](./docs/sections/deployment.md#MPS3-board).
+
+To get familiar with these steps, you can follow the [quick start guide](docs/quick_start.md).
+
+For more details:
+
+- [Arm Ethos-U55 NPU Code Samples](docs/documentation.md)
+  - [Trademarks](docs/documentation.md#Trademarks)
+  - [Prerequisites](docs/documentation.md#Prerequisites)
+    - [Additional reading](docs/documentation.md#additional-reading)
+  - [Repository structure](docs/documentation.md#repository-structure)
+  - [Building](docs/documentation.md#building)
+  - [Deployment](docs/documentation.md#deployment)
+  - [Running sample applications](docs/documentation.md#running-code-samples-applications)
+  - [Implementing custom ML application](docs/documentation.md#implementing-custom-ML-application)
+  - [Testing and benchmarking](docs/documentation.md#testing-and-benchmarking)
+  - [Troubleshooting](docs/documentation.md#troubleshooting)
+  - [Appendix](docs/documentation.md#appendix)
+  - [Contribution guidelines](docs/documentation.md#contribution-guidelines)
+    - [Coding standards and guidelines](docs/documentation.md#coding-standards-and-guidelines)
+    - [Code Reviews](docs/documentation.md#code-reviews)
+    - [Testing](docs/documentation.md#testing)
+  - [Communications](docs/documentation.md#communications)
+  - [Licenses](docs/documentation.md#licenses)
+
diff --git a/dependencies/cmsis b/dependencies/cmsis
new file mode 160000
index 0000000..0d7e4fa
--- /dev/null
+++ b/dependencies/cmsis
@@ -0,0 +1 @@
+Subproject commit 0d7e4fa7131241a17e23dfae18140e0b2e77728f
diff --git a/dependencies/core-software b/dependencies/core-software
new file mode 160000
index 0000000..3a0d3f2
--- /dev/null
+++ b/dependencies/core-software
@@ -0,0 +1 @@
+Subproject commit 3a0d3f286be62b4933ba404187aff23cae166a5a
diff --git a/dependencies/core_driver b/dependencies/core_driver
new file mode 160000
index 0000000..8565d75
--- /dev/null
+++ b/dependencies/core_driver
@@ -0,0 +1 @@
+Subproject commit 8565d75b96a2f57f559f12dc0c68438bcfd276c8
diff --git a/dependencies/tensorflow b/dependencies/tensorflow
new file mode 160000
index 0000000..6cff09a
--- /dev/null
+++ b/dependencies/tensorflow
@@ -0,0 +1 @@
+Subproject commit 6cff09aee1f832d495b3cae40cab0de58155a0af
diff --git a/docs/documentation.md b/docs/documentation.md
new file mode 100644
index 0000000..655ef27
--- /dev/null
+++ b/docs/documentation.md
@@ -0,0 +1,390 @@
+# Arm Ethos-U55 NPU Code Samples
+
+## Table of Content
+
+- [Arm Ethos-U55 NPU Code Samples](./documentation.md#arm-ethos-u55-npu-code-samples)
+  - [Table of Content](./documentation.md#table-of-content)
+  - [Trademarks](./documentation.md#trademarks)
+  - [Prerequisites](./documentation.md#prerequisites)
+    - [Additional reading](./documentation.md#additional-reading)
+  - [Repository structure](./documentation.md#repository-structure)
+  - [Models and resources](./documentation.md#models-and-resources)
+  - [Building](./documentation.md#building)
+  - [Deployment](./documentation.md#deployment)
+  - [Running code samples applications](./documentation.md#running-code-samples-applications)
+  - [Implementing custom ML application](./documentation.md#implementing-custom-ml-application)
+  - [Testing and benchmarking](./documentation.md#testing-and-benchmarking)
+  - [Troubleshooting](./documentation.md#troubleshooting)
+    - [Coding standards and guidelines](./documentation.md#coding-standards-and-guidelines)
+    - [Code Reviews](./documentation.md#code-reviews)
+    - [Testing](./documentation.md#testing)
+  - [Appendix](./documentation.md#appendix)
+
+## Trademarks
+
+- Arm® and Cortex® are registered trademarks of Arm® Limited (or its subsidiaries) in the US and/or elsewhere.
+- Arm® and Ethos™ are registered trademarks or trademarks of Arm® Limited (or its subsidiaries) in the US and/or elsewhere.
+- Arm® and Corstone™ are registered trademarks or trademarks of Arm® Limited (or its subsidiaries) in the US and/or elsewhere.
+- TensorFlow™, the TensorFlow logo and any related marks are trademarks of Google Inc.
+
+## Prerequisites
+
+Before starting the setup process, please make sure that you have:
+
+- Linux x86_64 based machine or Windows Subsystem for Linux is
+    preferable. Windows can be used as a build environment but cannot
+    run Fast Model simulations.
+
+- Arm Compiler license (version 6.14 or above).
+
+  - [Arm Compiler Download
+        Page](https://developer.arm.com/tools-and-software/embedded/arm-compiler/downloads/)
+
+- An Arm® MPS3 FPGA prototyping board and components for FPGA evaluation or a `Fixed Virtual Platform` binary:
+  - An MPS3 board loaded with  Arm® Corstone™-300 reference package (`AN547`) from:
+    <https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/download-fpga-images>.
+    You would also need to have a USB connection between your machine and the MPS3 board - for UART menu and for
+    deploying the application.
+  - `Arm Corstone-300` based FVP for MPS3 is available from: <https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps>.
+
+### Additional reading
+
+This document contains information that is specific to Arm® Ethos™-U55 products.
+See the following documents for other relevant information:
+
+- ML platform overview: <https://mlplatform.org/>
+
+- Arm® ML processors technical overview: <https://developer.arm.com/ip-products/processors/machine-learning>
+
+- Arm® Cortex-M55® processor: <https://www.arm.com/products/silicon-ip-cpu/cortex-m/cortex-m55>
+
+- ML processor, also referred to as a Neural Processing Unit (NPU) - Arm® Ethos™-U55:
+    <https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-u55>
+
+- Arm® MPS3 FPGA Prototyping Board:
+    <https://developer.arm.com/tools-and-software/development-boards/fpga-prototyping-boards/mps3>
+
+- Arm® ML-Zoo: <https://github.com/ARM-software/ML-zoo/>
+
+See <http://developer.arm.com> for access to Arm documentation.
+
+
+## Repository structure
+
+The repository has the following structure:
+
+```tree
+.
+├── dependencies
+├── docs
+├── scripts
+│   └── ...
+├── source
+│   ├── application
+│   │ ├── hal
+│   │ ├── main
+│   │ └── tensorflow-lite-micro
+│   └── use_case
+│     └── <usecase_name>
+│          ├── include
+│          ├── src
+│          └── usecase.cmake
+├── tests
+│   └── ...
+└── CMakeLists.txt
+```
+
+Where:
+
+- `dependencies`: contains all the third party dependencies for this project.
+
+- `docs`: contains the documentation for this ML applications.
+
+- `scripts`: contains build related and source generation scripts.
+
+- `source`: contains C/C++ sources for the platform and ML applications.
+    Common code related to the Ethos-U55 NPU software
+    framework resides in *application* sub-folder with the following
+    structure:
+
+  - `application`: contains all the sources that form the *core* of the application.
+    The `use case` part of the sources depend on sources here.
+
+    - `hal`: contains hardware abstraction layer sources providing a
+        platform agnostic API to access hardware platform specific functions.
+
+    - `main`: contains the main function and calls to platform initialization
+          logic to set things up before launching the main loop.
+          It also contains sources common to all use case implementations.
+
+    - `tensorflow-lite-micro`: contains abstraction around TensorFlow Lite Micro API
+          implementing common functions to initialize a neural network model, run an inference, and
+          access inference results.
+
+  - `use_case`: contains the ML use-case specific logic. Having this as a separate sub-folder isolates ML specific
+    application logic with the assumption that the `application` will do all the required set up for logic here to run.
+    It also makes it easier to add a new use case block.
+
+  - `tests`: contains the x86 tests for the use case applications.
+
+Hardware abstraction layer has the following structure:
+
+```tree
+hal
+├── hal.c
+├── include
+│   └── ...
+└── platforms
+    ├── bare-metal
+    │   ├── bsp
+    │   │   ├── bsp-core
+    │   │   │   └── include
+    │   │   ├── bsp-packs
+    │   │   │   └── mps3
+    │   │   ├── cmsis-device
+    │   │   ├── include
+    │   │   └── mem_layout
+    │   ├── data_acquisition
+    │   ├── data_presentation
+    │   │   ├── data_psn.c
+    │   │   └── lcd
+    │   │       └── include
+    │   ├── images
+    │   ├── timer
+    │   └── utils
+    └── native
+```
+
+- `include` and `hal.c`: contains the hardware abstraction layer (HAL) top level platform API and data acquisition, data
+presentation and timer interfaces.
+    > Note: the files here and lower in the hierarchy have been written in
+    C and this layer is a clean C/C++ boundary in the sources.
+
+- `platforms/bare-metal/data_acquisition`\
+`platforms/bare-metal/data_presentation`\
+`platforms/bare-metal/timer`\
+`platforms/bare-metal/utils`: contains bare metal HAL support layer and platform initialisation helpers. Function calls
+  are routed to platform specific logic at this level. For example, for data presentation, an `lcd` module has been used.
+  This wraps the LCD driver calls for the actual hardware (for example MPS3).
+
+- `platforms/bare-metal/bsp/bsp-packs`: contains the core low-level drivers (written in C) for the platform reside.
+  For supplied examples this happens to be an MPS3 board, but support could be added here for other platforms too.
+  The functions defined in this space are wired to the higher level functions under HAL (as those in `platforms/bare-metal/` level).
+
+- `platforms/bare-metal/bsp/bsp-packs/mps3/include`\
+`platforms/bare-metal/bsp/bsp-packs/mps3`: contains the peripheral (LCD, UART and timer) drivers specific to MPS3 board.
+
+- `platforms/bare-metal/bsp/bsp-core`\
+`platforms/bare-metal/bsp/include`: contains the BSP core sources common to all BSPs. These include a UART header
+  (only the implementation of this is platform specific, but the API is common) and "re-targeting" of the standard output
+  and error streams to the UART block.
+
+- `platforms/bare-metal/bsp/cmsis-device`: contains the CMSIS template implementation for the CPU and also device
+  initialisation routines. It is also where the system interrupts are set up and handlers are overridden.
+  The main entry point of a bare metal application will most likely reside in this space. This entry point is
+  responsible for setting up before calling the user defined "main" function in the higher level `application` logic.
+
+- `platforms/bare-metal/bsp/mem_layout`: contains the platform specific linker scripts.
+
+### Models and resources
+
+The models used in the use cases implemented in this project can be downloaded
+from [Arm ML-Zoo](https://github.com/ARM-software/ML-zoo/).
+
+- [Mobilenet V2](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8).
+- [DS-CNN](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8).
+- [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8).
+- Anomaly Detection (coming soon).
+
+When using Ethos-U55 backend, the NN model is assumed to be optimized by Vela compiler.
+However, even if not, it will fall back on the CPU and execute, if supported by TensorFlow Lite Micro.
+
+![Vela compiler](./media/vela_flow.jpg)
+
+The Vela compiler is a tool that can optimize a neural network model
+into a version that can run on an embedded system containing Ethos-U55.
+
+The optimized model will contain custom operators for sub-graphs of the
+model that can be accelerated by Ethos-U55, the remaining layers that
+cannot be accelerated are left unchanged and will run on the CPU using
+optimized (CMSIS-NN) or reference kernels provided by the inference
+engine.
+
+For detailed information see [Optimize model with Vela compiler](./sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+## Building
+
+This section describes how to build the code sample applications from sources - illustrating the build
+options and the process.
+
+The project can be built for MPS3 FPGA and FVP emulating MPS3. Default values for configuration parameters
+will build executable models with Ethos-U55 support.
+See:
+
+- [Building](./sections/building.md)
+  - [Build prerequisites](./sections/building.md#build-prerequisites)
+  - [Build options](./sections/building.md#build-options)
+  - [Build Process](./sections/building.md#build-process)
+    - [Preparing build environment](./sections/building.md#Preparing-build-environment)
+    - [Create a build directory](./sections/building.md#Create-a-build-directory)
+    - [Configuring the build for `MPS3: SSE-300`](./sections/building.md#Configuring-the-build-for-`MPS3:-SSE-300`)
+    - [Configuring build for different Arm Ethos-U55 configurations](./sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations)
+    - [Configuring the build for `MPS3: SSE-200`](./sections/building.md#Configuring-the-build-for-`MPS3:-SSE-200`)
+    - [Configuring the build native unit-test](./sections/building.md#configuring-the-build-native-unit-test)
+    - [Configuring the build for `simple_platform`](./sections/building.md#configuring-the-build-for-`simple_platform`)
+    - [Building the configured project](./sections/building.md#Building-the-configured-project)
+  - [Building timing adapter with custom options](./sections/building.md#building-timing-adapter-with-custom-options)
+  - [Add custom inputs](./sections/building.md#add-custom-inputs)
+  - [Add custom model](./sections/building.md#add-custom-model)
+  - [Optimize custom model with Vela compiler](./sections/building.md#Optimize-custom-model-with-Vela-compiler)
+  - [Memory constraints](./sections/building.md#memory-constraints)
+  - [Automatic file generation](./sections/building.md#automatic-file-generation)
+
+## Deployment
+
+This section describes how to deploy the code sample applications on the Fixed Virtual Platform or the MPS3 board.
+See:
+
+- [Deployment](./sections/deployment.md)
+  - [Fixed Virtual Platform](./sections/deployment.md#fixed-Virtual-Platform)
+    - [Setting up the MPS3 Corstone-300 FVP](./sections/deployment.md#Setting-up-the-MPS3-Corstone-300-FVP)
+    - [Deploying on an FVP emulating MPS3](./sections/deployment.md#Deploying-on-an-FVP-emulating-MPS3)
+  - [MPS3 board](./sections/deployment.md#MPS3-board)
+    - [Deployment on MPS3 board](./sections/deployment.md#Deployment-on-MPS3-board)
+
+## Running code samples applications
+
+This section covers the process for getting started with pre-built binaries for the code samples.
+See [Running applications](./sections/run.md).
+
+## Implementing custom ML application
+
+This section describes how to implement a custom Machine Learning application running
+on a platform supported by the repository (Fixed Virtual Platform or an MPS3 board).
+
+Ethos-U55 NPU Code Samples software project offers a simple way to incorporate additional
+use-case code into the existing infrastructure and provides a build
+system that automatically picks up added functionality and produces
+corresponding executable for each use-case.
+
+See:
+
+- [Customizing](./sections/customizing.md)
+  - [Software project description](./sections/customizing.md#Software-project-description)
+  - [HAL API](./sections/customizing.md#hal-api)
+  - [Main loop function](./sections/customizing.md#main-loop-function)
+  - [Application context](./sections/customizing.md#application-context)
+  - [Profiler](./sections/customizing.md#Profiler)
+  - [NN Model API](./sections/customizing.md#NN-model-API)
+  - [Adding custom ML use-case](./sections/customizing.md#Adding-custom-ML-use-case)
+  - [Implementing main loop](./sections/customizing.md#Implementing-main-loop)
+  - [Implementing custom NN model](./sections/customizing.md#Implementing-custom-NN-model)
+  - [Executing inference](./sections/customizing.md#executing-inference)
+  - [Printing to console](./sections/customizing.md#printing-to-console)
+  - [Reading user input from console](./sections/customizing.md#reading-user-input-from-console)
+  - [Output to MPS3 LCD](./sections/customizing.md#output-to-MPS3-LCD)
+  - [Building custom use-case](./sections/customizing.md#building-custom-use-case)
+
+## Testing and benchmarking
+
+See [Testing and benchmarking](./sections/testing_benchmarking.md).
+
+## Troubleshooting
+
+See:
+
+- [Troubleshooting](./sections/troubleshooting.md)
+  - [Inference results are incorrect for my custom files](./sections/troubleshooting.md#Inference-results-are-incorrect-for-my-custom-files)
+  - [The application does not work with my custom model](./sections/troubleshooting.md#The-application-does-not-work-with-my-custom-model)
+
+## Appendix
+
+See:
+
+- [Appendix](./sections/appendix.md)
+  - [Cortex-M55 Memory map overview](./sections/appendix.md#cortex-m55-memory-map-overview)
+
+## Contribution guidelines
+
+Contributions are only accepted under the following conditions:
+
+- The contribution have certified origin and give us your permission. To manage this process we use
+  [Developer Certificate of Origin (DCO) V1.1](https://developercertificate.org/).
+  To indicate that contributors agree to the the terms of the DCO, it's neccessary "sign off" the
+  contribution by adding a line with name and e-mail address to every git commit message:
+
+  ```log
+  Signed-off-by: John Doe <john.doe@example.org>
+  ```
+
+  This can be done automatically by adding the `-s` option to your `git commit` command.
+  You must use your real name, no pseudonyms or anonymous contributions are accepted.
+
+- You give permission according to the [Apache License 2.0](../LICENSE_APACHE_2.0.txt).
+
+  In each source file, include the following copyright notice:
+
+  ```copyright
+  /*
+  * Copyright (c) <years additions were made to project> <your name>, Arm Limited. All rights reserved.
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * Licensed under the Apache License, Version 2.0 (the "License");
+  * you may not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+  ```
+
+### Coding standards and guidelines
+
+This repository follows a set of guidelines, best practices, programming styles and conventions,
+see:
+
+- [Coding standards and guidelines](./sections/coding_guidelines.md)
+  - [Introduction](./sections/coding_guidelines.md#introduction)
+  - [Language version](./sections/coding_guidelines.md#language-version)
+  - [File naming](./sections/coding_guidelines.md#file-naming)
+  - [File layout](./sections/coding_guidelines.md#file-layout)
+  - [Block Management](./sections/coding_guidelines.md#block-management)
+  - [Naming Conventions](./sections/coding_guidelines.md#naming-conventions)
+    - [C++ language naming conventions](./sections/coding_guidelines.md#c_language-naming-conventions)
+    - [C language naming conventions](./sections/coding_guidelines.md#c-language-naming-conventions)
+  - [Layout and formatting conventions](./sections/coding_guidelines.md#layout-and-formatting-conventions)
+  - [Language usage](./sections/coding_guidelines.md#language-usage)
+
+### Code Reviews
+
+Contributions must go through code review. Code reviews are performed through the
+[mlplatform.org Gerrit server](https://review.mlplatform.org). Contributors need to signup to this
+Gerrit server with their GitHub account credentials.
+In order to be merged a patch needs to:
+
+- get a "+1 Verified" from the pre-commit job.
+- get a "+2 Code-review" from a reviewer, it means the patch has the final approval.
+
+### Testing
+
+Prior to submitting a patch for review please make sure that all build variants works and unit tests pass.
+Contributions go through testing at the continuous integration system. All builds, tests and checks must pass before a
+contribution gets merged to the master branch.
+
+## Licenses
+
+The ML Embedded applications samples are provided under the Apache 2.0 license, see [License Apache 2.0](../LICENSE_APACHE_2.0.txt).
+
+Application input data sample files are provided under their original license:
+
+|  | Licence | Provenience |
+|---------------|---------|---------|
+| [Automatic Speech Recognition Samples](../resources/asr/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](../resources/LICENSE_CC_4.0.txt) | <http://www.openslr.org/12/> |
+| [Image Classification Samples](../resources/img_class/samples/files.md) | [Creative Commons Attribution 1.0](../resources/LICENSE_CC_1.0.txt) | <https://www.pexels.com> |
+| [Keyword Spotting Samples](../resources/kws/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](../resources/LICENSE_CC_4.0.txt) | <http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz> |
+| [Keyword Spotting and Automatic Speech Recognition Samples](../resources/kws_asr/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](../resources/LICENSE_CC_4.0.txt) | <http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz> |
diff --git a/docs/media/APIs_description.png b/docs/media/APIs_description.png
new file mode 100644
index 0000000..57e2b32
--- /dev/null
+++ b/docs/media/APIs_description.png
Binary files differ
diff --git a/docs/media/ASR_preprocessing.png b/docs/media/ASR_preprocessing.png
new file mode 100644
index 0000000..3383a2e
--- /dev/null
+++ b/docs/media/ASR_preprocessing.png
Binary files differ
diff --git a/docs/media/F1.png b/docs/media/F1.png
new file mode 100644
index 0000000..b843e1e
--- /dev/null
+++ b/docs/media/F1.png
Binary files differ
diff --git a/docs/media/F2.png b/docs/media/F2.png
new file mode 100644
index 0000000..ab903e8
--- /dev/null
+++ b/docs/media/F2.png
Binary files differ
diff --git a/docs/media/F3.png b/docs/media/F3.png
new file mode 100644
index 0000000..0effcb7
--- /dev/null
+++ b/docs/media/F3.png
Binary files differ
diff --git a/docs/media/F4.png b/docs/media/F4.png
new file mode 100644
index 0000000..c7f6ac1
--- /dev/null
+++ b/docs/media/F4.png
Binary files differ
diff --git a/docs/media/KWS_preprocessing.png b/docs/media/KWS_preprocessing.png
new file mode 100644
index 0000000..7a6f3fd
--- /dev/null
+++ b/docs/media/KWS_preprocessing.png
Binary files differ
diff --git a/docs/media/fvp.png b/docs/media/fvp.png
new file mode 100644
index 0000000..ca4ffa5
--- /dev/null
+++ b/docs/media/fvp.png
Binary files differ
diff --git a/docs/media/fvpterminal.png b/docs/media/fvpterminal.png
new file mode 100644
index 0000000..ff39152
--- /dev/null
+++ b/docs/media/fvpterminal.png
Binary files differ
diff --git a/docs/media/mps3.png b/docs/media/mps3.png
new file mode 100644
index 0000000..3fb0dff
--- /dev/null
+++ b/docs/media/mps3.png
Binary files differ
diff --git a/docs/media/vela_flow.jpg b/docs/media/vela_flow.jpg
new file mode 100644
index 0000000..1f052ee
--- /dev/null
+++ b/docs/media/vela_flow.jpg
Binary files differ
diff --git a/docs/quick_start.md b/docs/quick_start.md
new file mode 100644
index 0000000..f557c72
--- /dev/null
+++ b/docs/quick_start.md
@@ -0,0 +1,95 @@
+# Quick start example ML application
+
+This is a quick start guide that will show you how to run the keyword spotting example application. The aim of this guide
+is to illustrate the flow of running an application on the evaluation kit rather than showing the keyword spotting
+functionality or performance. All use cases in the evaluation kit follow the steps.
+
+1. Verify you have installed [the required prerequisites](sections/building.md#Build-prerequisites).
+
+2. Clone the Ethos-U55 evaluation kit repository.
+
+    ```commandline
+    git clone "https://review.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit"
+    cd ml-embedded-evaluation-kit
+    ```
+
+3. Pull all the external dependencies with the commands below:
+
+    ```commandline
+    git submodule update --init
+    ```
+
+4. Next, you would need to get a neural network model. For the purpose of this quick start guide, we'll use the
+    `ds_cnn_clustered_int8` keyword spotting model from the [Arm public model zoo](https://github.com/ARM-software/ML-zoo)
+    and the principle remains the same for all of the other use cases. Download the `ds_cnn_large_int8.tflite` model
+    file with the curl command below:
+
+    ```commandline
+    curl -L https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8/ds_cnn_clustered_int8.tflite?raw=true --output ds_cnn_clustered_int8.tflite
+    ```
+
+5. [Vela](https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela) is an open-source python tool converting
+    TensorFlow Lite for Microcontrollers neural network model into an optimized model that can run on an embedded system
+    containing an Ethos-U55 NPU. It is worth noting that in order to take full advantage of the capabilities of the NPU, the
+    neural network operators should be [supported by Vela](https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/HEAD/SUPPORTED_OPS.md).
+    In this step, you will compile the model with Vela.
+
+    For this step, you need to ensure you have [correctly installed the Vela package](https://pypi.org/project/ethos-u-vela/):
+
+    ```commandline
+    python3 -m venv env
+    source ./env/bin/activate
+    pip install --upgrade pip
+    pip install ethos-u-vela
+    ```
+
+    In the command below, we specify that we are using the Arm® Ethos™-U55 NPU with a 128 Multiply-Accumulate units
+    (MAC units) configured for a High End Embedded use case. The [building section](sections/building.md#Optimize-custom-model-with-Vela-compiler)
+    has more detailed explanation about Vela usage.
+
+    ```commandline
+    vela ds_cnn_clustered_int8.tflite \
+        --accelerator-config=ethos-u55-128 \
+        --block-config-limit=0 \
+        --config scripts/vela/vela.ini \
+        --memory-mode Shared_Sram \
+        --system-config Ethos_U55_High_End_Embedded
+    ```
+
+    An optimized model file for Ethos-U55 is generated in a folder named `output`.
+
+6. Create a `build` folder in the root level of the evaluation kit.
+
+    ```commandline
+    mkdir build && cd build
+    ```
+
+7. Build the makefiles with `CMake` as shown in the command below. The [build process section](sections/building.md#Build-process)
+    gives an in-depth explanation about the meaning of every parameter. For the time being, note that we point the Vela
+    optimized model from stage 5 in the `-Dkws_MODEL_TFLITE_PATH` parameter.
+
+    ```commandline
+    cmake \
+        -DTARGET_PLATFORM=mps3 \
+        -DTARGET_SUBSYSTEM=sse-300 \
+        -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+        -DUSE_CASE_BUILD=kws \
+        -Dkws_MODEL_TFLITE_PATH=output/ds_cnn_clustered_int8_vela.tflite \
+        ..
+    ```
+
+8. Compile the project with a `make`. Details about this stage can be found in the [building part of the documentation](sections/building.md#Building-the-configured-project).
+
+    ```commandline
+    make -j4
+    ```
+
+9. Launch the project as explained [here](sections/deployment.md#Deployment). In this quick-start guide, we'll use the Fixed
+    Virtual Platform. Point the generated `bin/ethos-u-kws.axf` file in stage 8 to the FVP that you have downloaded when
+    installing the prerequisites.
+
+    ```commandline
+    <path_to_FVP>/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-kws.axf
+    ```
+
+10. A telnet window is launched through which you can interact with the application and obtain performance figures.
diff --git a/docs/sections/appendix.md b/docs/sections/appendix.md
new file mode 100644
index 0000000..7b56faa
--- /dev/null
+++ b/docs/sections/appendix.md
@@ -0,0 +1,20 @@
+# Appendix
+
+## Arm® Cortex®-M55 Memory map overview for Corstone™-300 reference design
+
+The table below is the memory mapping information specific to the Arm® Cortex®-M55.
+
+| Name  | Base address | Limit address |  Size     | IDAU |  Remarks                                                  |
+|-------|--------------|---------------|-----------|------|-----------------------------------------------------------|
+| ITCM  | 0x0000_0000  |  0x0007_FFFF  |   512 kiB |  NS  |   ITCM code region                                        |
+| BRAM  | 0x0100_0000  |  0x0120_0000  |   2 MiB   |  NS  |   FPGA data SRAM region                                   |
+| DTCM  | 0x2000_0000  |  0x2007_FFFF  |  512 kiB  |  NS  |   4 banks for 128 kiB each                                |
+| SRAM  | 0x2100_0000  |  0x213F_FFFF  |  4 MiB    |  NS  |   2 banks of 2 MiB each as SSE-300 internal SRAM region   |
+| DDR   | 0x6000_0000  |  0x6FFF_FFFF  |   256 MiB |  NS  |   DDR memory region                                       |
+| ITCM  | 0x1000_0000  |  0x1007_FFFF  |   512 kiB |  S   |   ITCM code region                                        |
+| BRAM  | 0x1100_0000  |  0x1120_0000  |   2 MiB   |  S   |   FPGA data SRAM region                                   |
+| DTCM  | 0x3000_0000  |  0x3007_FFFF  |   512 kiB |  S   |   4 banks for 128 kiB each                                |
+| SRAM  | 0x3100_0000  |  0x313F_FFFF  |   4 MiB   |  S   |   2 banks of 2 MiB each as SSE-300 internal SRAM region   |
+| DDR   | 0x7000_0000  |  0x7FFF_FFFF  |  256 MiB  |  S   |   DDR memory region                                       |
+
+Default memory map can be found here: https://developer.arm.com/documentation/101051/0002/Memory-model/Memory-map
\ No newline at end of file
diff --git a/docs/sections/building.md b/docs/sections/building.md
new file mode 100644
index 0000000..56771b8
--- /dev/null
+++ b/docs/sections/building.md
@@ -0,0 +1,1023 @@
+# Building the Code Samples application from sources
+
+## Contents
+
+- [Building the Code Samples application from sources](#building-the-code-samples-application-from-sources)
+  - [Contents](#contents)
+  - [Build prerequisites](#build-prerequisites)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+    - [Preparing build environment](#preparing-build-environment)
+    - [Create a build directory](#create-a-build-directory)
+    - [Configuring the build for `MPS3: SSE-300`](#configuring-the-build-for-mps3-sse-300)
+    - [Configuring the build for `MPS3: SSE-200`](#configuring-the-build-for-mps3-sse-200)
+    - [Configuring the build native unit-test](#configuring-the-build-native-unit-test)
+    - [Configuring the build for `simple_platform`](#configuring-the-build-for-simple_platform)
+    - [Building the configured project](#building-the-configured-project)
+  - [Building timing adapter with custom options](#building-timing-adapter-with-custom-options)
+  - [Add custom inputs](#add-custom-inputs)
+  - [Add custom model](#add-custom-model)
+  - [Optimize custom model with Vela compiler](#optimize-custom-model-with-vela-compiler)
+  - [Memory constraints](#memory-constraints)
+  - [Automatic file generation](#automatic-file-generation)
+
+This section assumes the use of an **x86 Linux** build machine.
+
+## Build prerequisites
+
+Before proceeding, please, make sure that the following prerequisites
+are fulfilled:
+
+- Arm Compiler version 6.14 or above is installed and available on the
+    path.
+
+    Test the compiler by running:
+
+    ```commandline
+    armclang -v
+    ```
+
+    ```log
+    Product: ARM Compiler 6.14 Professional
+    Component: ARM Compiler 6.14
+    ```
+
+    > **Note:** Add compiler to the path, if needed:
+    >
+    > `export PATH=/path/to/armclang/bin:$PATH`
+
+- Compiler license is configured correctly
+
+- CMake version 3.15 or above is installed and available on the path.
+    Test CMake by running:
+
+    ```commandline
+    cmake --version
+    ```
+
+    ```log
+    cmake version 3.16.2
+    ```
+
+    > **Note:** Add cmake to the path, if needed:
+    >
+    > `export PATH=/path/to/cmake/bin:$PATH`
+
+- Python 3.6 or above is installed. Test python version by running:
+
+    ```commandline
+    python3 --version
+    ```
+
+    ```log
+    Python 3.6.8
+    ```
+
+- Build system will create python virtual environment during the build
+    process. Please make sure that python virtual environment module is
+    installed:
+
+    ```commandline
+    python3 -m venv
+    ```
+
+- Make or MinGW make For Windows
+
+    ```commandline
+    make --version
+    ```
+
+    ```log
+    GNU Make 4.1
+
+    ...
+    ```
+
+    > **Note:** Add it to the path environment variable, if needed.
+
+- Access to the Internet to download the third party dependencies, specifically: TensorFlow Lite Micro, Arm Ethos-U55
+driver and CMSIS. Instructions for downloading these are listed under [preparing build environment](#preparing-build-environment).
+
+## Build options
+
+The project build system allows user to specify custom NN
+model (in `.tflite` format) or images and compile application binary from
+sources.
+
+The build system uses pre-built TensorFlow Lite for Microcontrollers
+library and Arm® Ethos™-U55 driver libraries from the delivery package.
+
+The build script is parameterized to support different options. Default
+values for build parameters will build the executable compatible with
+the Ethos-U55 Fast Model.
+
+The build parameters are:
+
+- `TARGET_PLATFORM`: Target platform to execute application:
+  - `mps3`
+  - `native`
+  - `simple_plaform`
+
+- `TARGET_SUBSYSTEM`: Platform target subsystem; this specifies the
+    design implementation for the deployment target. For both, the MPS3
+    FVP and the MPS3 FPGA, this should be left to the default value of
+    SSE-300:
+  - `sse-300` (default - [Arm® Corstone™-300](https://developer.arm.com/ip-products/subsystem/corstone/corstone-300))
+  - `sse-200`
+
+- `TENSORFLOW_SRC_PATH`: Path to the root of the TensorFlow directory.
+    The default value points to the TensorFlow submodule in the
+    [ethos-u](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) `dependencies` folder.
+
+- `ETHOS_U55_DRIVER_SRC_PATH`: Path to the Ethos-U55 core driver sources.
+    The default value points to the core_driver submodule in the
+    [ethos-u](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) `dependencies` folder.
+
+- `CMSIS_SRC_PATH`: Path to the CMSIS sources to be used to build TensorFlow
+    Lite Micro library. This parameters is optional and valid only for
+    Arm® Cortex®-M CPU targeted configurations. The default value points to the CMSIS submodule in the
+    [ethos-u](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) `dependencies` folder.
+
+- `ETHOS_U55_ENABLED`: Sets whether the use of Ethos-U55 is available for
+    the deployment target. By default, this is set and therefore
+    application is built with Ethos-U55 supported.
+
+- `CPU_PROFILE_ENABLED`: Sets whether profiling information for the CPU
+    core should be displayed. By default, this is set to false, but can
+    be turned on for FPGA targets. The the FVP, the CPU core's cycle
+    counts are not meaningful and should not be used.
+
+- `LOG_LEVEL`: Sets the verbosity level for the application's output
+    over UART/stdout. Valid values are `LOG_LEVEL_TRACE`, `LOG_LEVEL_DEBUG`,
+    `LOG_LEVEL_INFO`, `LOG_LEVEL_WARN` and `LOG_LEVEL_ERROR`. By default, it
+    is set to `LOG_LEVEL_INFO`.
+
+- `<use_case>_MODEL_TFLITE_PATH`: Path to the model file that will be
+    processed and included into the application axf file. The default
+    value points to one of the delivered set of models. Make sure the
+    model chosen is aligned with the `ETHOS_U55_ENABLED` setting.
+
+  - When using Ethos-U55 backend, the NN model is assumed to be
+    optimized by Vela compiler.
+    However, even if not, it will fall back on the CPU and execute,
+    if supported by TensorFlow Lite Micro.
+
+  - When use of Ethos-U55 is disabled, and if a Vela optimized model
+    is provided, the application will report a failure at runtime.
+
+- `USE_CASE_BUILD`: specifies the list of applications to build. By
+    default, the build system scans sources to identify available ML
+    applications and produces executables for all detected use-cases.
+    This parameter can accept single value, for example,
+    `USE_CASE_BUILD=img_class` or multiple values, for example,
+    `USE_CASE_BUILD="img_class;kws"`.
+
+- `ETHOS_U55_TIMING_ADAPTER_SRC_PATH`: Path to timing adapter sources.
+    The default value points to the `timing_adapter` dependencies folder.
+
+- `TA_CONFIG_FILE`: Path to the CMake configuration file containing the
+    timing adapter parameters. Used only if the timing adapter build is
+    enabled.
+
+- `TENSORFLOW_LITE_MICRO_CLEAN_BUILD`: Optional parameter to enable/disable
+    "cleaning" prior to building for the TensorFlow Lite Micro library.
+    It is enabled by default.
+
+- `TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS`: Optional parameter to enable wiping
+    out TPIP downloads from TensorFlow source tree prior to each build.
+    It is disabled by default.
+
+- `ARMCLANG_DEBUG_DWARF_LEVEL`: When the CMake build type is specified as `Debug`
+    and when armclang toolchain is being used to build for a Cortex-M CPU target,
+    this optional argument can be set to specify the DWARF format.
+    By default, this is set to 4 and is synonymous with passing `-g`
+    flag to the compiler. This is compatible with Arm-DS and other tools
+    which can interpret the latest DWARF format. To allow debugging using
+    the Model Debugger from Arm FastModel Tools Suite, this argument can be used
+    to pass DWARF format version as "3". Note: this option is only available
+    when CMake project is configured with `-DCMAKE_BUILD_TYPE=Debug` argument.
+    Also, the same dwarf format is used for building TensorFlow Lite Micro library.
+
+> **Note:** For details on the specific use case build options, follow the
+> instructions in the use-case specific documentation.
+> Also, when setting any of the CMake configuration parameters that expect a directory/file path , it is advised
+>to **use absolute paths instead of relative paths**.
+
+## Build process
+
+The build process can summarized in three major steps:
+
+- Prepare the build environment by downloading third party sources required, see
+[Preparing build environment](#preparing-build-environment).
+
+- Configure the build for the platform chosen.
+This stage includes:
+  - CMake options configuration
+  - When `<use_case>_MODEL_TFLITE_PATH` build options aren't provided, defaults neural network models are be downloaded
+from [Arm ML-Zoo](https://github.com/ARM-software/ML-zoo/). In case of native build, network's input and output data
+for tests are downloaded.
+  - Some files such as neural network models, network's inputs and output labels are automatically converted
+    into C/C++ arrays, see [Automatic file generation](#automatic-file-generation).
+
+- Build the application.\
+During this stage application and third party libraries are built see [Building the configured project](#building-the-configured-project).
+
+### Preparing build environment
+
+Certain third party sources are required to be present on the development machine to allow the example sources in this
+repository to link against.
+
+1. [TensorFlow Lite Micro repository](https://github.com/tensorflow/tensorflow)
+2. [Ethos-U55 core driver repository](https://review.mlplatform.org/admin/repos/ml/ethos-u/ethos-u-core-driver)
+3. [CMSIS-5](https://github.com/ARM-software/CMSIS_5.git)
+
+These are part of the [ethos-u repository](https://git.mlplatform.org/ml/ethos-u/ethos-u.git/about/) and set as
+submodules of this project.
+
+To pull the submodules:
+
+```sh
+git submodule update --init
+```
+
+This will download all the required components and place them in a tree like:
+
+```tree
+dependencies
+ └── ethos-u
+     ├── cmsis
+     ├── core_driver
+     ├── tensorflow
+     └── ...
+```
+
+> **NOTE**: The default source paths for the TPIP sources assume the above directory structure, but all of the relevant
+>paths can be overridden by CMake configuration arguments `TENSORFLOW_SRC_PATH`, `ETHOS_U55_DRIVER_SRC_PATH`,
+>and `CMSIS_SRC_PATH`.
+
+### Create a build directory
+
+Create a build directory in the root of the project and navigate inside:
+
+```commandline
+mkdir build && cd build
+```
+
+### Configuring the build for `MPS3: SSE-300`
+
+On Linux, execute the following command to build the application to run
+on the Ethos-U55 when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific
+file to set the compiler and platform specific parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 ..
+```
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver and CMSIS are not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` and `CMSIS_SRC_PATH` can be used to configure their location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DCMSIS_SRC_PATH=/my/custom/location/cmsis ..
+```
+
+> **Note:** If re-building with changed parameters values, it is
+highly advised to clean the build directory and re-run the CMake command.
+
+### Configuring the build for `MPS3: SSE-200`
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-200 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+for Windows add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-200 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -G "MinGW Makefiles ..
+```
+
+### Configuring the build native unit-test
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=native \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/native-toolchain.cmake ..
+```
+
+For Windows add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=native \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/native-toolchain.cmake \
+    -G "MinGW Makefiles ..
+```
+
+Results of the build will be placed under `build/bin/` folder:
+
+```tree
+ bin
+  |- dev_ethosu_eval-tests
+  |_ ethos-u
+```
+
+### Configuring the build for `simple_platform`
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=simple_platform \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=simple_platform \
+    -DCMAKE_TOOLCHAIN_FILE=public/scripts/cmake/bare-metal-toolchain.cmake \
+    -G "MinGW Makefiles" ..
+```
+
+### Building the configured project
+
+If the CMake command succeeds, build the application as follows:
+
+```commandline
+make -j4
+```
+
+or for Windows:
+
+```commandline
+mingw32-make -j4
+```
+
+Add `VERBOSE=1` to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder, an
+example:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-<use_case_name>.axf
+ â”œâ”€â”€ ethos-u-<use_case_name>.htm
+ â”œâ”€â”€ ethos-u-<use_case_name>.map
+ â”œâ”€â”€ images-<use_case_name>.txt
+ â””── sectors
+        └── <use_case>
+                ├── dram.bin
+                └── itcm.bin
+```
+
+Where for each implemented use-case under the `source/use-case` directory,
+the following build artefacts will be created:
+
+- `ethos-u-<use case name>.axf`: The built application binary for a ML
+    use case.
+
+- `ethos-u-<use case name>.map`: Information from building the
+    application (e.g. libraries used, what was optimized, location of
+    objects).
+
+- `ethos-u-<use case name>.htm`: Human readable file containing the
+    call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files
+    for loading into different FPGA memory regions.
+
+- `images-<use case name>.txt`: Tells the FPGA which memory regions to
+    use for loading the binaries in sectors/** folder.
+
+> **Note:**  For the specific use case commands see the relative section
+in the use case documentation.
+
+## Building timing adapter with custom options
+
+The sources also contains the configuration for a timing adapter utility
+for the Ethos-U55 driver. The timing adapter allows the platform to simulate user
+provided memory bandwidth and latency constraints.
+
+The timing adapter driver aims to control the behavior of two AXI buses
+used by Ethos-U55. One is for SRAM memory region and the other is for
+flash or DRAM. The SRAM is where intermediate buffers are expected to be
+allocated and therefore, this region can serve frequent R/W traffic
+generated by computation operations while executing a neural network
+inference. The flash or DDR is where we expect to store the model
+weights and therefore, this bus would typically be used only for R/O
+traffic.
+
+It is used for MPS3 FPGA as well as for Fast Model environment.
+
+The CMake build framework allows the parameters to control the behavior
+of each bus with following parameters:
+
+- `MAXR`: Maximum number of pending read operations allowed. 0 is
+    inferred as infinite, and the default value is 4.
+
+- `MAXW`: Maximum number of pending write operations allowed. 0 is
+    inferred as infinite, and the default value is 4.
+
+- `MAXRW`: Maximum number of pending read+write operations allowed. 0 is
+    inferred as infinite, and the default value is 8.
+
+- `RLATENCY`: Minimum latency, in cycle counts, for a read operation.
+    This is the duration between ARVALID and RVALID signals. The default
+    value is 50.
+
+- `WLATENCY`: Minimum latency, in cycle counts, for a write operation.
+    This is the duration between WVALID + WLAST and BVALID being
+    de-asserted. The default value is 50.
+
+- `PULSE_ON`: Number of cycles during which addresses are let through.
+    The default value is 5100.
+
+- `PULSE_OFF`: Number of cycles during which addresses are blocked. The
+    default value is 5100.
+
+- `BWCAP`: Maximum number of 64-bit words transferred per pulse cycle. A
+    pulse cycle is PULSE_ON + PULSE_OFF. 0 is inferred as infinite, and
+    the default value is 625.
+
+- `MODE`: Timing adapter operation mode. Default value is 0
+
+  - Bit 0: 0=simple; 1=latency-deadline QoS throttling of read vs.
+        write
+
+  - Bit 1: 1=enable random AR reordering (0=default),
+
+  - Bit 2: 1=enable random R reordering (0=default),
+
+  - Bit 3: 1=enable random B reordering (0=default)
+
+For timing adapter's CMake build configuration, the SRAM AXI is assigned
+index 0 and the flash/DRAM AXI bus has index 1. To change the bus
+parameter for the build a "***TA_\<index>_**"* prefix should be added
+to the above. For example, **TA0_MAXR=10** will set the SRAM AXI bus's
+maximum pending reads to 10.
+
+As an example, if we have the following parameters for flash/DRAM
+region:
+
+- `TA1_MAXR` = "2"
+
+- `TA1_MAXW` = "0"
+
+- `TA1_MAXRW` = "0"
+
+- `TA1_RLATENCY` = "64"
+
+- `TA1_WLATENCY` = "32"
+
+- `TA1_PULSE_ON` = "320"
+
+- `TA1_PULSE_OFF` = "80"
+
+- `TA1_BWCAP` = "50"
+
+For a clock rate of 500MHz, this would translate to:
+
+- The maximum duty cycle for any operation is:\
+![Maximum duty cycle formula](../media/F1.png)
+
+- Maximum bit rate for this bus (64-bit wide) is:\
+![Maximum bit rate formula](../media/F2.png)
+
+- With a read latency of 64 cycles, and maximum pending reads as 2,
+    each read could be a maximum of 64 or 128 bytes, as defined for
+    Ethos-U55\'s AXI bus\'s attribute.
+
+    The bandwidth is calculated solely by read parameters ![Bandwidth formula](
+        ../media/F3.png)
+
+    This is higher than the overall bandwidth dictated by the bus parameters
+    of \
+    ![Overall bandwidth formula](../media/F4.png)
+
+This suggests that the read operation is limited only by the overall bus
+bandwidth.
+
+Timing adapter requires recompilation to change parameters. Default timing
+adapter configuration file pointed to by `TA_CONFIG_FILE` build parameter is
+located in the scripts/cmake folder and contains all options for AXI0 and
+AXI1 described above.
+
+An example of scripts/cmake/ta_config.cmake:
+
+```cmake
+# Timing adapter options
+set(TA_INTERACTIVE OFF)
+
+# Timing adapter settings for AXI0
+set(TA0_MAXR "8")
+set(TA0_MAXW "8")
+set(TA0_MAXRW "0")
+set(TA0_RLATENCY "32")
+set(TA0_WLATENCY "32")
+set(TA0_PULSE_ON "3999")
+set(TA0_PULSE_OFF "1")
+set(TA0_BWCAP "4000")
+...
+```
+
+An example of the build with custom timing adapter configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTA_CONFIG_FILE=scripts/cmake/my_ta_config.cmake ..
+```
+
+## Add custom inputs
+
+The application performs inference on input data found in the folder set
+by the CMake parameters, for more information see the 3.3 section in the
+specific use case documentation.
+
+## Add custom model
+
+The application performs inference using the model pointed to by the
+CMake parameter `MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom
+model has been run through the Vela compiler successfully before continuing.
+
+To run the application with a custom model you will need to provide a
+labels_<model_name>.txt file of labels associated with the model.
+Each line of the file should correspond to one of the outputs in your
+model. See the provided labels_mobilenet_v2_1.0_224.txt file in the
+img_class use case for an example.
+
+Then, you must set `<use_case>_MODEL_TFLITE_PATH` to the location of
+the Vela processed model file and `<use_case>_LABELS_TXT_FILE` to the
+location of the associated labels file:
+
+```commandline
+cmake \
+    -D<use_case>_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -D<use_case>_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+> **Note:** For the specific use case command see the relative section in the use case documentation.
+
+For Windows, add `-G MinGW Makefiles` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The TensorFlow Lite for Microcontrollers model pointed to by `<use_case>_MODEL_TFLITE_PATH` and
+labels text file pointed to by `<use_case>_LABELS_TXT_FILE` will be
+converted to C++ files during the CMake configuration stage and then
+compiled into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path and
+labels file have been used:
+
+```log
+-- User option TARGET_PLATFORM is set to mps3
+-- User option <use_case>_MODEL_TFLITE_PATH is set to
+<path/to/custom_model_after_vela.tflite>
+...
+-- User option <use_case>_LABELS_TXT_FILE is set to
+<path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build>/generated/include/Labels.hpp and <path/to/build>/generated/src/Labels.cc
+...
+```
+
+After compiling, your custom model will have now replaced the default
+one in the application.
+
+## Optimize custom model with Vela compiler
+
+> **Note:** This tool is not available within this project.
+It is a python tool available from <https://pypi.org/project/ethos-u-vela/>.
+The source code is hosted on <https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/>.
+
+The Vela compiler is a tool that can optimize a neural network model
+into a version that can run on an embedded system containing Ethos-U55.
+
+The optimized model will contain custom operators for sub-graphs of the
+model that can be accelerated by Ethos-U55, the remaining layers that
+cannot be accelerated are left unchanged and will run on the CPU using
+optimized (CMSIS-NN) or reference kernels provided by the inference
+engine.
+
+After the compilation, the optimized model can only be executed on a
+system with Ethos-U55.
+
+> **Note:** The NN model provided during the build and compiled into the application
+executable binary defines whether CPU or NPU is used to execute workloads.
+If unoptimized model is used, then inference will run on Cortex-M CPU.
+
+Vela compiler accepts parameters to influence a model optimization. The
+model provided within this project has been optimized with
+the following parameters:
+
+```commandline
+vela \
+    --accelerator-config=ethos-u55-128 \
+    --block-config-limit=0 \
+    --config my_vela_cfg.ini \
+    --memory-mode Shared_Sram \
+    --system-config Ethos_U55_High_End_Embedded \
+    <model>.tflite
+```
+
+Where:
+
+- `--accelerator-config`: Specify the accelerator configuration to use
+    between ethos-u55-256, ethos-u55-128, ethos-u55-64 and ethos-u55-32.
+- `--block-config-limit`: Limit block config search space, use zero for
+    unlimited.
+- `--config`: Specifies the path to the Vela configuration file. The format of the file is a Python ConfigParser .ini file.
+    An example can be found in the `dependencies` folder [vela.ini](../../scripts/vela/vela.ini).
+- `--memory-mode`: Selects the memory mode to use as specified in the Vela configuration file.
+- `--system-config`:Selects the system configuration to use as specified in the Vela configuration file.
+
+Vela compiler accepts `.tflite` file as input and saves optimized network
+model as a `.tflite` file.
+
+Using `--show-cpu-operations` and `--show-subgraph-io-summary` will show
+all the operations that fall back to the CPU and a summary of all the
+subgraphs and their inputs and outputs.
+
+To see Vela helper for all the parameters use: `vela --help`.
+
+Please, get in touch with your Arm representative to request access to
+Vela Compiler documentation for more details.
+
+> **Note:** By default, use of the Ethos-U55 is enabled in the CMake configuration.
+This could be changed by passing `-DETHOS_U55_ENABLED`.
+
+## Memory constraints
+
+Both the MPS3 Fixed Virtual Platform and the MPS3 FPGA platform share
+the linker script (scatter file) for SSE-300 design. The design is set
+by the CMake configuration parameter `TARGET_SUBSYSTEM` as described in
+the previuous section.
+
+The memory map exposed by this design is presented in Appendix 1. This
+can be used as a reference when editing the scatter file, especially to
+make sure that region boundaries are respected. The snippet from MPS3's
+scatter file is presented below:
+
+```
+;---------------------------------------------------------
+; First load region
+;---------------------------------------------------------
+LOAD_REGION_0 0x00000000 0x00080000
+{
+    ;-----------------------------------------------------
+    ; First part of code mem -- 512kiB
+    ;-----------------------------------------------------
+    itcm.bin 0x00000000 0x00080000
+    {
+        *.o (RESET, +First)
+        * (InRoot$$Sections)
+        .ANY (+RO)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of 512kiB bank is used for any other RW or ZI
+    ; data. Note: this region is internal to the Cortex-M CPU
+    ;-----------------------------------------------------
+    dtcm.bin 0x20000000 0x00020000
+    {
+        .ANY(+RW +ZI)
+    }
+
+    ;-----------------------------------------------------
+    ; 128kiB of stack space within the DTCM region
+    ;-----------------------------------------------------
+    ARM_LIB_STACK 0x20020000 EMPTY ALIGN 8 0x00020000
+    {}
+
+    ;-----------------------------------------------------
+    ; 256kiB of heap space within the DTCM region
+    ;-----------------------------------------------------
+
+    ARM_LIB_HEAP 0x20040000 EMPTY ALIGN 8 0x00040000
+    {}
+
+    ;-----------------------------------------------------
+    ; SSE-300's internal SRAM
+    ;-----------------------------------------------------
+    isram.bin 0x21000000 UNINIT ALIGN 16 0x00080000
+    {
+        ; activation buffers a.k.a tensor arena
+        *.o (.bss.NoInit.activation_buf)
+    }
+}
+
+;---------------------------------------------------------
+; Second load region
+;---------------------------------------------------------
+LOAD_REGION_1 0x60000000 0x02000000
+{
+    ;-----------------------------------------------------
+    ; 32 MiB of DRAM space for nn model and input vectors
+    ;-----------------------------------------------------
+    dram.bin 0x60000000 ALIGN 16 0x02000000
+    {
+        ; nn model's baked in input matrices
+        *.o (ifm)
+
+        ; nn model
+        *.o (nn_model)
+
+        ; if the activation buffer (tensor arena) doesn't
+        ; fit in the SRAM region, we accommodate it here
+        *.o (activation_buf)
+    }
+}
+```
+
+It is worth noting that in the bitfile implementation, only the BRAM,
+internal SRAM and DDR memory regions are accessible to the Ethos-U55
+block. In the above snippet, the internal SRAM region memory can be seen
+to be utilized by activation buffers with a limit of 512kiB. If used,
+this region will be written to by the Ethos-U55 block frequently. A bigger
+region of memory for storing the model is placed in the DDR region,
+under LOAD_REGION_1. The two load regions are necessary as the MPS3's
+motherboard configuration controller limits the load size at address
+0x00000000 to 512kiB. This has implications on how the application **is
+deployed** on MPS3 as explained under the section 3.8.3.
+
+## Automatic file generation
+
+As mentioned in the previous sections, some files such as neural network
+models, network's inputs, and output labels are automatically converted
+into C/C++ arrays during the CMake project configuration stage.
+Additionally, some code is generated to allow access to these arrays.
+
+An example:
+
+```log
+-- Building use-cases: img_class.
+-- Found sources for use-case img_class
+-- User option img_class_FILE_PATH is set to /tmp/samples
+-- User option img_class_IMAGE_SIZE is set to 224
+-- User option img_class_LABELS_TXT_FILE is set to /tmp/labels/labels_model.txt
+-- Generating image files from /tmp/samples
+++ Converting cat.bmp to cat.cc
+++ Converting dog.bmp to dog.cc
+-- Skipping file /tmp/samples/files.md due to unsupported image format.
+++ Converting kimono.bmp to kimono.cc
+++ Converting tiger.bmp to tiger.cc
+++ Generating /tmp/build/generated/img_class/include/InputFiles.hpp
+-- Generating labels file from /tmp/labels/labels_model.txt
+-- writing to /tmp/build/generated/img_class/include/Labels.hpp and /tmp/build/generated/img_class/src/Labels.cc
+-- User option img_class_ACTIVATION_BUF_SZ is set to 0x00200000
+-- User option img_class_MODEL_TFLITE_PATH is set to /tmp/models/model.tflite
+-- Using /tmp/models/model.tflite
+++ Converting model.tflite to    model.tflite.cc
+...
+```
+
+In particular, the building options pointing to the input files `<use_case>_FILE_PATH`,
+the model `<use_case>_MODEL_TFLITE_PATH` and labels text file `<use_case>_LABELS_TXT_FILE`
+are used by python scripts in order to generate not only the converted array files,
+but also some headers with utility functions.
+
+For example, the generated utility functions for image classification are:
+
+- `build/generated/include/InputFiles.hpp`
+
+```c++
+#ifndef GENERATED_IMAGES_H
+#define GENERATED_IMAGES_H
+
+#include <cstdint>
+
+#define NUMBER_OF_FILES  (2U)
+#define IMAGE_DATA_SIZE  (150528U)
+
+extern const uint8_t im0[IMAGE_DATA_SIZE];
+extern const uint8_t im1[IMAGE_DATA_SIZE];
+
+const char* get_filename(const uint32_t idx);
+const uint8_t* get_img_array(const uint32_t idx);
+
+#endif /* GENERATED_IMAGES_H */
+```
+
+- `build/generated/src/InputFiles.cc`
+
+```c++
+#include "InputFiles.hpp"
+
+static const char *img_filenames[] = {
+    "img1.bmp",
+    "img2.bmp",
+};
+
+static const uint8_t *img_arrays[] = {
+    im0,
+    im1
+};
+
+const char* get_filename(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_filenames[idx];
+    }
+    return nullptr;
+}
+
+const uint8_t* get_img_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_arrays[idx];
+    }
+    return nullptr;
+}
+```
+
+These headers are generated using python templates, that are in `scripts/py/templates/*.template`.
+
+```tree
+scripts/
+├── cmake
+│   ├── ...
+│   ├── subsystem-profiles
+│   │   ├── corstone-sse-200.cmake
+│   │   └── corstone-sse-300.cmake
+│   ├── templates
+│   │   ├── mem_regions.h.template
+│   │   ├── peripheral_irqs.h.template
+│   │   └── peripheral_memmap.h.template
+│   └── ...
+└── py
+    ├── <generation scripts>
+    ├── requirements.txt
+    └── templates
+        ├── audio.cc.template
+        ├── AudioClips.cc.template
+        ├── AudioClips.hpp.template
+        ├── default.hpp.template
+        ├── header_template.txt
+        ├── image.cc.template
+        ├── Images.cc.template
+        ├── Images.hpp.template
+        ├── Labels.cc.template
+        ├── Labels.hpp.template
+        ├── testdata.cc.template
+        ├── TestData.cc.template
+        ├── TestData.hpp.template
+        └── tflite.cc.template
+```
+
+Based on the type of use case the correct conversion is called in the use case cmake file
+(audio or image respectively for voice or vision use cases).
+For example, the generations call for image classification (`source/use_case/img_class/usecase.cmake`):
+
+```c++
+# Generate input files
+generate_images_code("${${use_case}_FILE_PATH}"
+                     ${SRC_GEN_DIR}
+                     ${INC_GEN_DIR}
+                     "${${use_case}_IMAGE_SIZE}")
+
+# Generate labels file
+set(${use_case}_LABELS_CPP_FILE Labels)
+generate_labels_code(
+    INPUT           "${${use_case}_LABELS_TXT_FILE}"
+    DESTINATION_SRC ${SRC_GEN_DIR}
+    DESTINATION_HDR ${INC_GEN_DIR}
+    OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+)
+
+...
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+)
+```
+
+> **Note:** When required, for models and labels conversion it's possible to add extra parameters such
+> as extra code to put in `<model>.cc` file or namespaces.
+>
+> ```c++
+> set(${use_case}_LABELS_CPP_FILE Labels)
+> generate_labels_code(
+>     INPUT           "${${use_case}_LABELS_TXT_FILE}"
+>     DESTINATION_SRC ${SRC_GEN_DIR}
+>     DESTINATION_HDR ${INC_GEN_DIR}
+>     OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+>     NAMESPACE       "namespace1" "namespace2"
+> )
+>
+> ...
+>
+> set(EXTRA_MODEL_CODE
+>     "/* Model parameters for ${use_case} */"
+>     "extern const int   g_myvariable2     = value1"
+>     "extern const int   g_myvariable2     = value2"
+> )
+>
+> generate_tflite_code(
+>     MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+>     DESTINATION ${SRC_GEN_DIR}
+>     EXPRESSIONS ${EXTRA_MODEL_CODE}
+>     NAMESPACE   "namespace1" "namespace2"
+> )
+> ```
+
+In addition to input file conversions, the correct platform/system profile is selected
+(in `scripts/cmake/subsystem-profiles/*.cmake`) based on `TARGET_SUBSYSTEM` build option
+and the variables set are used to generate memory region sizes, base addresses and IRQ numbers,
+respectively used to generate mem_region.h, peripheral_irqs.h and peripheral_memmap.h headers.
+Templates from `scripts/cmake/templates/*.template` are used to generate the header files.
+
+After the build, the files generated in the build folder are:
+
+```tree
+build/generated/
+├── bsp
+│   ├── mem_regions.h
+│   ├── peripheral_irqs.h
+│   └── peripheral_memmap.h
+├── <use_case_name1>
+│   ├── include
+│   │   ├── InputFiles.hpp
+│   │   └── Labels.hpp
+│   └── src
+│       ├── <uc1_input_file1>.cc
+│       ├── <uc1_input_file2>.cc
+│       ├── InputFiles.cc
+│       ├── Labels.cc
+│       └── <uc1_model_name>.tflite.cc
+└──  <use_case_name2>
+    ├── include
+    │   ├── InputFiles.hpp
+    │   └── Labels.hpp
+    └── src
+        ├── <uc2_input_file1>.cc
+        ├── <uc2_input_file2>.cc
+        ├── InputFiles.cc
+        ├── Labels.cc
+        └── <uc2_model_name>.tflite.cc
+```
+
+Next section of the documentation: [Deployment](../documentation.md#Deployment).
diff --git a/docs/sections/coding_guidelines.md b/docs/sections/coding_guidelines.md
new file mode 100644
index 0000000..f1813d3
--- /dev/null
+++ b/docs/sections/coding_guidelines.md
@@ -0,0 +1,323 @@
+# Coding standards and guidelines
+
+## Contents
+
+- [Introduction](#introduction)
+- [Language version](#language-version)
+- [File naming](#file-naming)
+- [File layout](#file-layout)
+- [Block Management](#block-management)
+- [Naming Conventions](#naming-conventions)
+  - [C++ language naming conventions](#c_language-naming-conventions)
+  - [C language naming conventions](#c-language-naming-conventions)
+- [Layout and formatting conventions](#layout-and-formatting-conventions)
+- [Language usage](#language-usage)
+
+## Introduction
+
+This document presents some standard coding guidelines to be followed for contributions to this repository. Most of the
+code is written in C++, but there is some written in C as well. There is a clear C/C++ boundary at the Hardware
+Abstraction Layer (HAL). Both these languages follow different naming conventions within this repository, by design, to:
+
+- have clearly distinguishable C and C++ sources.
+- make cross language function calls stand out. Mostly these will be C++ function calls to the HAL functions written in C.
+However, because we also issue function calls to third party API's (and they may not follow these conventions), the
+intended outcome may not be fully realised in all of the cases.
+
+## Language version
+
+For this project, code written in C++ shall use a subset of the C++11 feature set and software
+may be written using the C++11 language standard. Code written in C should be compatible
+with the C99 standard.
+
+Software components written in C/C++ may use the language features allowed and encouraged by this documentation.
+
+## File naming
+
+- C files should have `.c` extension
+- C++ files should have `.cc` or `.cpp` extension.
+- Header files for functions implemented in C should have `.h` extension.
+- Header files for functions implemented in C++ should have `.hpp` extension.
+
+## File layout
+
+- Standard copyright notice must be included in all files:
+
+  ```copyright
+  /*
+  * Copyright (c) <years additions were made to project> <your name>, Arm Limited. All rights reserved.
+  * SPDX-License-Identifier: Apache-2.0
+  *
+  * Licensed under the Apache License, Version 2.0 (the "License");
+  * you may not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+  ```
+
+- Source lines must be no longer than 120 characters. Prefer to spread code out vertically rather than horizontally,
+  wherever it makes sense:
+
+  ```C++
+  # This is significantly easier to read
+  enum class SomeEnum1
+  {
+      ENUM_VALUE_1,
+      ENUM_VALUE_2,
+      ENUM_VALUE_3
+  };
+
+  # than this
+  enum class SomeEnum2 { ENUM_VALUE_1, ENUM_VALUE_2, ENUM_VALUE_3 };
+  ```
+
+- Block indentation should use 4 characters, no tabs.
+
+- Each statement must be on a separate line.
+
+  ```C++
+  int a, b; // Error prone
+  int c, *d;
+
+  int e = 0; // GOOD
+  int *p = nullptr; // GOOD
+  ```
+
+- Source must not contain commented out code or unreachable code
+
+## Block Management
+
+- Blocks must use braces and braces location must be consistent.
+  - Each function has its opening brace at the next line on the same indentation level as its header, the code within
+  the braces is indented and the closing brace at the end is on the same level as the opening.
+  For compactness, if the class/function body is empty braces are accepted on the same line.
+
+  - Conditional statements and loops, even if are just single-statement body, needs to be surrounded by braces, the
+opening brace is at the same line, the closing brace is at the next line on the same indentation level as its header;
+the same rule is applied to classes.
+
+    ```C++
+    class Class1 {
+    public:
+        Class1();
+    private:
+        int element;
+    };
+
+    void NotEmptyFunction()
+    {
+        if (condition) {
+            // [...]
+        } else {
+            // [...]
+        }
+        // [...]
+        for(start_cond; end_cond; step_cond) {
+            // [...]
+        }
+    }
+
+    void EmptyFunction() {}
+    ```
+
+  - Cases within switch are indented and enclosed in brackets:
+
+    ```C++
+    switch (option)
+    {
+        case 1:
+        {
+            // handle option 1
+            break;
+        }
+        case 2:
+        {
+            // handle option 2
+            break;
+        }
+        default:
+        {
+            break;
+        }
+    }
+    ```
+
+## Naming Conventions
+
+### C++ language naming conventions
+
+- Type (class, struct, enum) names must be `PascalCase`:
+
+  ```C++
+  class SomeClass
+  {
+      // [...]
+  };
+  void SomeFunction()
+  {
+      // [...]
+  }
+  ```
+
+- Variables and parameter names must be `camelCase`:
+
+  ```C++
+  int someVariable;
+
+  void SomeFunction(int someParameter) {}
+  ```
+
+- Macros, pre-processor definitions, and enumeration values should use upper case names:
+
+  ```C++
+  #define SOME_DEFINE
+
+  enum class SomeEnum
+  {
+      ENUM_VALUE_1,
+      ENUM_VALUE_2
+  };
+  ```
+
+- Namespace names must be lower case
+
+  ```C++
+  namespace nspace
+  {
+  void FunctionInNamespace();
+  };
+  ```
+
+- Source code should use Hungarian notation to annotate the name of a variable with information about its meaning.
+
+  | Prefix | Class | Description |
+  | ------ | ----- | ----------- |
+  | p | Type      | Pointer to any other type |
+  | k | Qualifier | Constant |
+  | v | Qualifier | Volatile |
+  | m | Scope     | Member of a class or struct |
+  | s | Scope     | Static |
+  | g | Scope     | Used to indicate variable has scope beyond the current function: file-scope or externally visible scope|
+
+The following examples  of Hungarian notation are one possible set of uses:
+
+  ```C++
+  int g_GlobalInt=123;
+  char* m_pNameOfMemberPointer=nullptr;
+  const float g_kSomeGlobalConstant = 1.234f;
+  static float ms_MyStaticMember =  4.321f;
+  bool myLocalVariable=true;
+  ```
+
+### C language naming conventions
+
+For C sources, we follow the Linux variant of the K&R style wherever possible.
+
+- For function and variable names we use `snake_case` convention:
+
+  ```C
+  int some_variable;
+
+  void some_function(int some_parameter) {}
+  ```
+
+- Macros, pre-processor definitions, and enumeration values should use upper case names:
+
+  ```C
+  #define SOME_DEFINE
+
+  enum some_enum
+  {
+      ENUM_VALUE_1,
+      ENUM_VALUE_2
+  };
+  ```
+
+## Layout and formatting conventions
+
+- C++ class code layout
+  Public function definitions should be at the top of a class definition, since they are things most likely to be used
+by other people.
+  Private functions and member variables should be last.
+  Class functions and member variables should be laid out logically in blocks of related functionality.
+
+- Class  inheritance keywords are not indented.
+
+  ```C++
+  class MyClass
+  {
+  public:
+    int m_PublicMember;
+  protected:
+    int m_ProtectedMember;
+  private:
+    int m_PrivateMember;
+  };
+  ```
+
+- Don't leave trailing spaces at the end of lines.
+
+- Empty lines should have no trailing spaces.
+
+- For pointers and references, the symbols `*` and `&` should be adjacent to the name of the type, not the name
+  of the variable.
+
+  ```C++
+  char* someText = "abc";
+
+  void SomeFunction(const SomeObject& someObject) {}
+  ```
+
+## Language usage
+
+- Header `#include` statements should be minimized.
+  Inclusion of unnecessary headers slows down compilation, and can hide errors where a function calls a
+  subroutine which it should not be using if the unnecessary header defining this subroutine is included.
+
+  Header statements should be included in the following order:
+
+  - Header file corresponding to the current source file (if applicable)
+  - Headers from the same component
+  - Headers from other components
+  - Third-party headers
+  - System headers
+
+  > **Note:** Leave one blank line between each of these groups for readability.
+  >Use quotes for headers from within the same project and angle brackets for third-party and system headers.
+  >Do not use paths relative to the current source file, such as `../Header.hpp`. Instead configure your include paths
+>in the project makefiles.
+
+  ```C++
+  #include "ExampleClass.hpp"     // Own header
+
+  #include "Header1.hpp"          // Header from same component
+  #include "Header1.hpp"          // Header from same component
+
+  #include "other/Header3.hpp"    // Header from other component
+
+  #include <ThirdParty.hpp>       // Third-party headers
+
+  #include <vector>               // System  header
+
+  // [...]
+  ```
+
+- C++ casts should use the template-styled case syntax
+
+  ```C++
+  int a = 100;
+  float b = (float)a; // Not OK
+  float c = static_cast<float>(a); // OK
+  ```
+
+- Use the const keyword to declare constants instead of define.
+
+- Should use `nullptr` instead of `NULL`,
+  C++11 introduced the `nullptr` type to distinguish null pointer constants from the integer 0.
diff --git a/docs/sections/customizing.md b/docs/sections/customizing.md
new file mode 100644
index 0000000..e92c327
--- /dev/null
+++ b/docs/sections/customizing.md
@@ -0,0 +1,731 @@
+# Implementing custom ML application
+
+- [Software project description](#software-project-description)
+- [HAL API](#hal-api)
+- [Main loop function](#main-loop-function)
+- [Application context](#application-context)
+- [Profiler](#profiler)
+- [NN Model API](#nn-model-api)
+- [Adding custom ML use case](#adding-custom-ml-use-case)
+- [Implementing main loop](#implementing-main-loop)
+- [Implementing custom NN model](#implementing-custom-nn-model)
+- [Executing inference](#executing-inference)
+- [Printing to console](#printing-to-console)
+- [Reading user input from console](#reading-user-input-from-console)
+- [Output to MPS3 LCD](#output-to-mps3-lcd)
+- [Building custom use case](#building-custom-use-case)
+
+This section describes how to implement a custom Machine Learning
+application running on Fast Model FVP or on the Arm MPS3 FPGA prototyping board.
+
+Arm® Ethos™-U55 code sample software project offers a simple way to incorporate
+additional use-case code into the existing infrastructure and provides a build
+system that automatically picks up added functionality and produces corresponding
+executable for each use-case. This is achieved by following certain configuration
+and code implementation conventions.
+
+The following sign will indicate the important conventions to apply:
+
+> **Convention:** The code is developed using C++11 and C99 standards.
+This is governed by TensorFlow Lite for Microcontrollers framework.
+
+## Software project description
+
+As mentioned in the [Repository structure](../documentation.md#repository-structure) section, project sources are:
+
+```tree
+├── docs
+│ ├── ...
+│ └── Documentation.md
+├── resources
+│ └── img_class
+│      └── ...
+├── scripts
+│ └── ...
+├── source
+│ ├── application
+│ │ ├── hal
+│ │ ├── main
+│ │ └── tensorflow-lite-micro
+│ └── use_case
+│     └──img_class
+├── CMakeLists.txt
+└── Readme.md
+```
+
+Where `source` contains C/C++ sources for the platform and ML applications.
+Common code related to the Ethos-U55 code samples software
+framework resides in the *application* sub-folder and ML application specific logic (use-cases)
+sources are in the *use-case* subfolder.
+
+> **Convention**: Separate use-cases must be organized in sub-folders under the use-case folder.
+The name of the directory is used as a name for this use-case and could be provided
+as a `USE_CASE_BUILD` parameter value.
+It is expected by the build system that sources for the use-case are structured as follows:
+headers in an include directory, C/C++ sources in a src directory.
+For example:
+>
+>```tree
+>use_case
+> └──img_class
+>       ├── include
+>       │   └── *.hpp
+>       └── src
+>           └── *.cc
+>```
+
+## HAL API
+
+Hardware abstraction layer is represented by the following interfaces.
+To access them, include hal.h header.
+
+- *hal_platfrom* structure:\
+    Structure that defines a platform context to be used by the application
+
+  |  Attribute name    | Description                                                                                                                                                         |
+  |--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+  |  inited            |  Initialization flag. Is set after the platfrom_init() function is called.                                                                                          |
+  |  plat_name         |  Platform name. it is set to "mps3-bare" for MPS3 build and "FVP" for Fast Model build.                                                                             |
+  |  data_acq          |  Pointer to data acquisition module responsible for user interaction and other data collection for the application logic.                                           |
+  |  data_psn          |  Pointer to data presentation module responsible for data output through components available in the selected platform: LCD -- for MPS3, console -- for Fast Model. |
+  |  timer             |  Pointer to platform timer implementation (see platform_timer)                                                                                                      |
+  |  platform_init     |  Pointer to platform initialization function.                                                                                                                       |
+  |  platform_release  |  Pointer to platform release function                                                                                                                               |
+
+- *hal_init* function:\
+    Initializes the HAL structure based on compile time config. This
+    should be called before any other function in this API.
+
+  |  Parameter name  | Description|
+  |------------------|-----------------------------------------------------|
+  |  platform        | Pointer to a pre-allocated *hal_platfrom* struct.   |
+  |  data_acq        | Pointer to a pre-allocated data acquisition module  |
+  |  data_psn        | Pointer to a pre-allocated data presentation module |
+  |  timer           | Pointer to a pre-allocated timer module             |
+  |  return          | zero if successful, error code otherwise            |
+
+- *hal_platform_init* function:\
+  Initializes the HAL platform and all the modules on the platform the
+  application requires to run.
+
+  | Parameter name  | Description                                                         |
+  | ----------------| ------------------------------------------------------------------- |
+  | platform        | Pointer to a pre-allocated and initialized *hal_platfrom* struct.   |
+  | return          | zero if successful, error code otherwise.                           |
+
+- *hal_platform_release* function\
+  Releases the HAL platform. This should release resources acquired.
+
+  | Parameter name  | Description                                                         |
+  | ----------------| ------------------------------------------------------------------- |
+  |  platform       | Pointer to a pre-allocated and initialized *hal_platfrom* struct.   |
+
+- *data_acq_module* structure:\
+  Structure to encompass the data acquisition module and it's
+  methods.
+
+  | Attribute name | Description                                        |
+  |----------------|----------------------------------------------------|
+  | inited         | Initialization flag. Is set after the system_init () function is called. |
+  | system_name  | Channel name. It is set to "UART" for MPS3 build and fastmodel builds. |
+  | system_init    | Pointer to data acquisition module initialization function. The pointer is set according to the platform selected during the build. This function is called by the platforminitialization routines.                           |
+  | get_input      | Pointer to a function reading user input. The pointer is set according to the selected platform during the build. For MPS3 and fastmodel environments, the function reads data from UART.   |
+
+- *data_psn_module* structure:\
+  Structure to encompass the data presentation module and its methods.
+
+  | Attribute name     | Description                                    |
+  |--------------------|------------------------------------------------|
+  | inited             | Initialization flag. It is set after the system_init () function is called. |
+  | system_name        | System component name used to present data. It is set to "lcd" for MPS3 build and to "log_psn" for fastmodel build. In case of fastmodel, all pixel drawing functions are replaced by console output of the data summary.                              |
+  | system_init        | Pointer to data presentation module initialization function. The pointer is set according to the platform selected during the build. This function is called by the platform initialization routines. |
+  | present_data_image | Pointer to a function to draw an image. The pointer is set according to the selected platform during the build. For MPS3, the image will be drawn on the LCD; for fastmodel  image summary will be printed in the UART  (coordinates, channel info, downsample factor) |
+  | present_data_text  | Pointer to a function to print a text. The pointer is set according to the selected platform during the build. For MPS3, the text will be drawn on the LCD; for fastmodel text will be printed in the UART. |
+  | present_box        | Pointer to a function to draw a rectangle. The pointer is set according to the selected platform during the build. For MPS3, the image will be drawn on the LCD; for fastmodel  image summary will be printed in the UART. |
+  | clear              | Pointer to a function to clear the output. The pointer is set according to the selected platform during the build. For MPS3, the function will clear the LCD; for fastmodel will do nothing. |
+  | set_text_color     | Pointer to a function to set text color for the next call of present_data_text() function. The pointer is set according to the selected platform during the build. For MPS3, the function will set the color for the text printed on the LCD; for fastmodel -- will do nothing. |
+  | set_led            | Pointer to a function controlling an LED (led_num) with on/off  |
+
+- *platform_timer* structure:\
+    Structure to hold a platform specific timer implementation.
+
+  | Attribute name     | Description                                    |
+  |--------------------|------------------------------------------------|
+  |  inited            |  Initialization flag. It is set after the timer is initialized by the *hal_platform_init* function. |
+  |  reset             |   Pointer to a function to reset a timer. |
+  |  get_time_counter  |   Pointer to a function to get current time counter. |
+  |  get_duration_ms   |   Pointer to a function to calculate duration between two time-counters in milliseconds. |
+  |  get_duration_us   |   Pointer to a function to calculate duration between two time-counters in microseconds |
+  |  get_npu_cycle_diff |  Pointer to a function to calculate duration between two time-counters in Ethos-U55 cycles. Available only when project is configured with ETHOS_U55_ENABLED set. |
+
+Example of the API initialization in the main function:
+
+```c++
+#include "hal.h"
+
+int main ()
+
+{
+
+  hal_platform platform;
+  data_acq_module dataAcq;
+  data_psn_module dataPsn;
+  platform_timer timer;
+
+  /* Initialise the HAL and platform */
+  hal_init(&platform, &dataAcq, &dataPsn, &timer);
+  hal_platform_init(&platform);
+
+  ...
+
+  hal_platform_release(&platform);
+
+  return 0;
+
+}
+```
+
+## Main loop function
+
+Code samples application main function will delegate the use-case
+logic execution to the main loop function that must be implemented for
+each custom ML scenario.
+
+Main loop function takes the initialized *hal_platform* structure
+pointer as an argument.
+
+The main loop function has external linkage and main executable for the
+use-case will have reference to the function defined in the use-case
+code.
+
+```c++
+void main_loop(hal_platform& platform){
+
+...
+
+}
+```
+
+## Application context
+
+Application context could be used as a holder for a state between main
+loop iterations. Include AppContext.hpp to use ApplicationContext class.
+
+| Method name  | Description                                                     |
+|--------------|-----------------------------------------------------------------|
+|  Set         |  Saves given value as a named attribute in the context.         |
+|  Get         |  Gets the saved attribute from the context by the given name.   |
+|  Has         |  Checks if an attribute with a given name exists in the context. |
+
+For example:
+
+```c++
+#include "hal.h"
+#include "AppContext.hpp"
+
+void main_loop(hal_platform& platform) {
+
+    /* Instantiate application context */
+    arm::app::ApplicationContext caseContext;
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<uint32_t>("counter", 0);
+
+    /* loop */
+  while (true) {
+    // do something, pass application context down the call stack
+  }
+}
+```
+
+## Profiler
+
+Profiler is a helper class assisting in collection of timings and
+Ethos-U55 cycle counts for operations. It uses platform timer to get
+system timing information.
+
+| Method name          | Description                                               |
+|----------------------|-----------------------------------------------------------|
+|  StartProfiling      | Starts profiling and records the starting timing data.    |
+|  StopProfiling       | Stops profiling and records the ending timing data.       |
+|  Reset               | Resets the profiler and clears all collected data.        |
+|  GetResultsAndReset  | Gets the results as string and resets the profiler.       |
+
+Usage example:
+
+```c++
+Profiler profiler{&platform, "Inference"};
+
+profiler.StartProfiling();
+// Code running inference to profile
+profiler.StopProfiling();
+
+info("%s\n", profiler.GetResultsAndReset().c_str());
+```
+
+## NN Model API
+
+Model (refers to neural network model) is an abstract class wrapping the
+underlying TensorFlow Lite Micro API and providing methods to perform
+common operations such as TensorFlow Lite Micro framework
+initialization, inference execution, accessing input and output tensor
+objects.
+
+To use this abstraction, import TensorFlowLiteMicro.hpp header.
+
+| Method name              | Description                                                                  |
+|--------------------------|------------------------------------------------------------------------------|
+|  GetInputTensor          |   Returns the pointer to the model\'s input tensor.                          |
+|  GetOutputTensor         |   Returns the pointer to the model\'s output tensor                          |
+|  GetType                 |   Returns the model's data type                                              |
+|  GetInputShape           |   Return the pointer to the model\'s input shape                             |
+|  GetOutputShape          |   Return the pointer to the model\'s output shape                            |
+|  LogTensorInfo           |   Logs the tensor information to stdout for the given tensor pointer: tensor name, tensor address, tensor type, tensor memory size and quantization params.  |
+|  LogInterpreterInfo      |   Logs the interpreter information to stdout.                                |
+|  Init                    |   Initializes the TensorFlow Lite Micro framework, allocates require memory for the model. |
+|  IsInited                |  Checks if this model object has been initialized.                           |
+|  IsDataSigned            |  Checks if the model uses signed data type.                                  |
+|  RunInference            |  Runs the inference (invokes the interpreter).                               |
+|  GetOpResolver()         |  Returns the reference to the TensorFlow Lite Micro operator resolver.       |
+|  EnlistOperations        |  Registers required operators with TensorFlow Lite Micro operator resolver.  |
+|  GetTensorArena          |  Returns pointer to memory region to be used for tensors allocations.        |
+|  GetActivationBufferSize |  Returns the size of the tensor arena memory region.                         |
+
+> **Convention**:  Each ML use-case must have extension of this class and implementation of the protected virtual methods:
+>
+>```c++
+>virtual const tflite::MicroOpResolver& GetOpResolver() = 0;
+>virtual bool EnlistOperations() = 0;
+>virtual uint8_t* GetTensorArena() = 0;
+>virtual size_t GetActivationBufferSize() = 0;
+>```
+>
+>Network models have different set of operators that must be registered with
+tflite::MicroMutableOpResolver object in the EnlistOperations method.
+Network models could require different size of activation buffer that is returned as
+tensor arena memory for TensorFlow Lite Micro framework by the GetTensorArena
+and GetActivationBufferSize methods.
+
+Please see MobileNetModel.hpp and MobileNetModel.cc files from image
+classification ML application use-case as an example of the model base
+class extension.
+
+## Adding custom ML use case
+
+This section describes how to implement additional use-case and compile
+it into the binary executable to run with Fast Model or MPS3 FPGA board.
+It covers common major steps: application main loop creation,
+description of the NN model, inference execution.
+
+In addition, few useful examples are provided: reading user input,
+printing into console, drawing images into MPS3 LCD.
+
+```tree
+use_case
+   └──hello_world
+      ├── include
+      └── src
+```
+
+Start with creation of a sub-directory under the *use_case* directory and
+two other directories *src* and *include* as described in
+[Software project description](#software-project-description) section:
+
+## Implementing main loop
+
+Use-case main loop is the place to put use-case main logic. Essentially,
+it is an infinite loop that reacts on user input, triggers use-case
+conditional logic based on the input and present results back to the
+user. However, it could also be a simple logic that runs a single inference
+and then exits.
+
+Main loop has knowledge about the platform and has access to the
+platform components through the hardware abstraction layer (referred to as HAL).
+
+Create a *MainLoop.cc* file in the *src* directory (the one created under
+[Adding custom ML use case](#adding-custom-ml-use-case)), the name is not
+important. Define *main_loop* function with the signature described in
+[Main loop function](#main-loop-function):
+
+```c++
+#include "hal.h"
+
+void main_loop(hal_platform& platform) {
+  printf("Hello world!");
+}
+```
+
+The above is already a working use-case, if you compile and run it (see
+[Building custom usecase](#Building-custom-use-case)) the application will start, print
+message to console and exit straight away.
+
+Now, you can start filling this function with logic.
+
+## Implementing custom NN model
+
+Before inference could be run with a custom NN model, TensorFlow Lite
+Micro framework must learn about the operators/layers included in the
+model. Developer must register operators using *MicroMutableOpResolver*
+API.
+
+Ethos-U55 code samples project has an abstraction around TensorFlow
+Lite Micro API (see [NN model API](#nn-model-api)). Create *HelloWorld.hpp* in
+the use-case include sub-directory, extend Model abstract class and
+declare required methods.
+
+For example:
+
+```c++
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+class HelloWorldModel: public Model {
+  protected:
+    /** @brief   Gets the reference to op resolver interface class. */
+    const tflite::MicroOpResolver& GetOpResolver() override;
+
+    /** @brief   Adds operations to the op resolver instance. */
+    bool EnlistOperations() override;
+
+    const uint8_t* ModelPointer() override;
+
+    size_t ModelSize() override;
+
+  private:
+    /* Maximum number of individual operations that can be enlisted. */
+    static constexpr int _m_maxOpCnt = 5;
+
+    /* A mutable op resolver instance. */
+    tflite::MicroMutableOpResolver<_maxOpCnt> _m_opResolver;
+  };
+} /* namespace app */
+} /* namespace arm */
+```
+
+Create `HelloWorld.cc` file in the `src` sub-directory and define the methods
+there. Include `HelloWorldModel.hpp` created earlier. Note that `Model.hpp`
+included in the header provides access to TensorFlow Lite Micro's operation
+resolver API.
+
+Please, see `use_case/image_classifiaction/src/MobileNetModel.cc` for
+code examples.\
+If you are using a TensorFlow Lite model compiled with Vela, it is important to add
+custom Ethos-U55 operator to the operators list.
+
+The following example shows how to add the custom Ethos-U55 operator with
+TensorFlow Lite Micro framework. We will use the ARM_NPU define to exclude
+the code if the application was built without NPU support.
+
+```c++
+#include "HelloWorldModel.hpp"
+
+bool arm::app::HelloWorldModel::EnlistOperations() {
+
+  #if defined(ARM_NPU)
+    if (kTfLiteOk == this->_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+  #endif /* ARM_NPU */
+
+    return true;
+}
+```
+
+To minimize application memory footprint, it is advised to register only
+operators used by the NN model.
+
+Define `ModelPointer` and `ModelSize` methods. These functions are wrappers around the
+functions generated in the C++ file containing the neural network model as an array.
+This generation the C++ array from the .tflite file, logic needs to be defined in
+the `usecase.cmake` file for this `HelloWorld` example.
+
+For more details on `usecase.cmake`, see [Building custom use case](#building-custom-use-case).
+For details on code generation flow in general, see [Automatic file generation](./building.md#Automatic-file-generation)
+
+The TensorFlow Lite model data is read during Model::init() method execution, see
+*application/tensorflow-lite-micro/Model.cc* for more details. Model invokes
+`ModelPointer()` function which calls the `GetModelPointer()` function to get
+neural network model data memory address. The `GetModelPointer()` function
+will be generated during the build and could be found in the
+file `build/generated/hello_world/src/<model_file_name>.cc`. Generated
+file is added to the compilation automatically.
+
+Use \${use-case}_MODEL_TFLITE_PATH build parameter to include custom
+model to the generation/compilation process (see [Build options](./building.md/#build-options)).
+
+## Executing inference
+
+To run an inference successfully it is required to have:
+
+- a TensorFlow Lite model file
+- extended Model class
+- place to add the code to invoke inference
+- main loop function
+- and some input data.
+
+For the hello_world example below, the input array is not populated.
+However, for real-world scenarios, this data should either be read from
+an on-board device or be prepared in the form of C++ sources before
+compilation and be baked into the application.
+
+For example, the image classification application has extra build steps
+to generate C++ sources from the provided images with
+*generate_images_code* CMake function.
+
+> **Note:**
+Check the input data type for your NN model and input array data type are  the same.
+For example, generated C++ sources for images store image data as uint8 array. For models that were
+quantized to int8 data type, it is important to convert image data to int8 correctly before inference execution.
+Asymmetric data type to symmetric data type conversion involves positioning zero value, i.e. subtracting an
+offset for uint8 values. Please check image classification application source for the code example
+(ConvertImgToInt8 function).
+
+The following code adds inference invocation to the main loop function:
+
+```c++
+#include "hal.h"
+#include "HelloWorldModel.hpp"
+
+  void main_loop(hal_platform& platform) {
+
+  /* model wrapper object */
+  arm::app::HelloWorldModel model;
+
+  /* Load the model */
+  if (!model.Init()) {
+    printf_err("failed to initialise model\n");
+    return;
+  }
+
+  TfLiteTensor *outputTensor = model.GetOutputTensor();
+  TfLiteTensor *inputTensor = model.GetInputTensor();
+
+  /* dummy input data*/
+  uint8_t inputData[1000];
+
+  memcpy(inputTensor->data.data, inputData, 1000);
+
+  /* run inference */
+  model.RunInference();
+
+  const uint32_t tensorSz = outputTensor->bytes;
+  const uint8_t * outputData = tflite::GetTensorData<uint8>(outputTensor);
+}
+```
+
+The code snippet has several important blocks:
+
+- Creating HelloWorldModel object and initializing it.
+
+  ```c++
+  arm::app::HelloWorldModel model;
+
+  /* Load the model */
+  if (!model.Init()) {
+    printf_err(\"failed to initialise model\\n\");
+    return;
+  }
+  ```
+
+- Getting pointers to allocated input and output tensors.
+
+  ```c++
+  TfLiteTensor *outputTensor = model.GetOutputTensor();
+  TfLiteTensor *inputTensor = model.GetInputTensor();
+  ```
+
+- Copying input data to the input tensor. We assume input tensor size
+  to be 1000 uint8 elements.
+
+  ```c++
+  memcpy(inputTensor->data.data, inputData, 1000);
+  ```
+
+- Running inference
+
+  ```c++
+  model.RunInference();
+  ```
+
+- Reading inference results: data and data size from the output
+  tensor. We assume that output layer has uint8 data type.
+
+  ```c++
+  Const uint32_t tensorSz = outputTensor->bytes ;
+
+  const uint8_t *outputData = tflite::GetTensorData<uint8>(outputTensor);
+  ```
+
+Adding profiling for Ethos-U55 is easy. Include `Profiler.hpp` header and
+invoke `StartProfiling` and `StopProfiling` around inference
+execution.
+
+```c++
+Profiler profiler{&platform, "Inference"};
+
+profiler.StartProfiling();
+model.RunInference();
+profiler.StopProfiling();
+std::string profileResults = profiler.GetResultsAndReset();
+
+info("%s\n", profileResults.c_str());
+```
+
+## Printing to console
+
+Provided examples already used some function to print messages to the
+console. The full list of available functions:
+
+- `printf`
+- `trace` - printf wrapper for tracing messages
+- `debug` - printf wrapper for debug messages
+- `info` - printf wrapper for informational messages
+- `warn` - printf wrapper for warning messages
+- `printf_err` - printf wrapper for error messages
+
+`printf` wrappers could be switched off with `LOG_LEVEL` define:
+
+trace (0) < debug (1) < info (2) < warn (3) < error (4).
+
+Default output level is info = level 2.
+
+## Reading user input from console
+
+Platform data acquisition module has get_input function to read keyboard
+input from the UART. It can be used as follows:
+
+```c++
+char ch_input[128];
+platform.data_acq->get_input(ch_input, sizeof(ch_input));
+```
+
+The function will block until user provides an input.
+
+## Output to MPS3 LCD
+
+Platform presentation module has functions to print text or an image to
+the board LCD:
+
+- `present_data_text`
+- `present_data_image`
+
+Text presentation function has the following signature:
+
+- `const char* str`: string to print.
+- `const uint32_t str_sz`: string size.
+- `const uint32_t pos_x`: x coordinate of the first letter in pixels.
+- `const uint32_t pos_y`: y coordinate of the first letter in pixels.
+- `const uint32_t alow_multiple_lines`: signals whether the text is
+    allowed to span multiple lines on the screen, or should be truncated
+    to the current line.
+
+This function does not wrap text, if the given string cannot fit on the
+screen it will go outside the screen boundary.
+
+Example that prints "Hello world" on the LCD:
+
+```c++
+std::string hello("Hello world");
+platform.data_psn->present_data_text(hello.c_str(), hello.size(), 10, 35, 0);
+```
+
+Image presentation function has the following signature:
+
+- `uint8_t* data`: image data pointer;
+- `const uint32_t width`: image width;
+- `const uint32_t height`: image height;
+- `const uint32_t channels`: number of channels. Only 1 and 3 channels are supported now.
+- `const uint32_t pos_x`: x coordinate of the first pixel.
+- `const uint32_t pos_y`: y coordinate of the first pixel.
+- `const uint32_t downsample_factor`: the factor by which the image is to be down sampled.
+
+For example, the following code snippet visualizes an input tensor data
+for MobileNet v2 224 (down sampling it twice):
+
+```c++
+platform.data_psn->present_data_image((uint8_t *) inputTensor->data.data, 224, 224, 3, 10, 35, 2);
+```
+
+Please see [hal-api](#hal-api) section for other data presentation
+functions.
+
+## Building custom use case
+
+There is one last thing to do before building and running a use-case
+application: create a `usecase.cmake` file in the root of your use-case,
+the name of the file is not important.
+
+> **Convention:**  The build system searches for CMake file in each use-case directory and includes it into the build
+> flow. This file could be used to specify additional application specific build options, add custom build steps or
+> override standard compilation and linking flags.
+> Use `USER_OPTION` function to add additional build option. Prefix variable name with `${use_case}` (use-case name) to
+> avoid names collisions with other CMake variables.
+> Some useful variable names visible in use-case CMake file:
+>
+> - `DEFAULT_MODEL_PATH` – default model path to use if use-case specific `${use_case}_MODEL_TFLITE_PATH` is not set
+>in the build arguments.
+>- `TARGET_NAME` – name of the executable.
+> - `use_case` – name of the current use-case.
+> - `UC_SRC` – list of use-case sources.
+> - `UC_INCLUDE` – path to the use-case headers.
+> - `ETHOS_U55_ENABLED` – flag indicating if the current build supports Ethos-U55.
+> - `TARGET_PLATFORM` – Target platform being built for.
+> - `TARGET_SUBSYSTEM` – If target platform supports multiple subsystems, this is the name of the subsystem.
+> - All standard build options.
+>   - `CMAKE_CXX_FLAGS` and `CMAKE_C_FLAGS` – compilation flags.
+>   - `CMAKE_EXE_LINKER_FLAGS` – linker flags.
+
+For the hello world use-case it will be enough to create
+`helloworld.cmake` file and set DEFAULT_MODEL_PATH:
+
+```cmake
+if (ETHOS_U55_ENABLED EQUAL 1)
+  set(DEFAULT_MODEL_PATH  ${DEFAULT_MODEL_DIR}/helloworldmodel_uint8_vela.tflite)
+else()
+  set(DEFAULT_MODEL_PATH  ${DEFAULT_MODEL_DIR}/helloworldmodel_uint8.tflite)
+endif()
+```
+
+This can be used in subsequent section, for example:
+
+```cmake
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "Neural network model in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH
+    )
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+    )
+```
+
+This ensures that the model path pointed by `${use_case}_MODEL_TFLITE_PATH` is converted to a C++ array and is picked
+up by the build system. More information on auto-generations is available under section
+[Automatic file generation](./building.md#Automatic-file-generation).
+
+To build you application follow the general instructions from
+[Add Custom inputs](#add-custom-inputs) and specify the name of the use-case in the
+build command:
+
+```commandline
+cmake \
+  -DTARGET_PLATFORM=mps3 \
+  -DTARGET_SUBSYSTEM=sse-300 \
+  -DUSE_CASE_BUILD=hello_world \
+  -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+As a result, `ethos-u-hello_world.axf` should be created, MPS3 build
+will also produce `sectors/hello_world` directory with binaries and
+`images-hello_world.txt` to be copied to the board MicroSD card.
+
+Next section of the documentation: [Testing and benchmarking](../documentation.md#Testing-and-benchmarking).
diff --git a/docs/sections/deployment.md b/docs/sections/deployment.md
new file mode 100644
index 0000000..354d30b
--- /dev/null
+++ b/docs/sections/deployment.md
@@ -0,0 +1,281 @@
+# Deployment
+
+- [Fixed Virtual Platform](#fixed-virtual-platform)
+  - [Setting up the MPS3 Arm Corstone-300 FVP](#setting-up-the-mps3-arm-corstone-300-fvp)
+  - [Deploying on an FVP emulating MPS3](#deploying-on-an-fvp-emulating-mps3)
+- [MPS3 board](#mps3-board)
+  - [Deployment on MPS3 board](#deployment-on-mps3-board)
+
+The sample application for Arm® Ethos™-U55 can be deployed on two
+target platforms, both of which implement the Arm® Corstone™-300 design (see
+<https://www.arm.com/products/iot/soc/corstone-300>):
+
+- A physical Arm MPS3 FPGA prototyping board
+
+- An MPS3 FVP
+
+## Fixed Virtual Platform
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads
+](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+Download the correct archive from the list under `Arm Corstone-300`. We need the one which:
+
+- Emulates MPS3 board (not for MPS2 FPGA board)
+- Contains support for Arm® Ethos™-U55
+
+> **Note:** Currently, the FVP only has a Linux OS version. Also, there are no FVPs available for `SSE-200`
+> which satisfy the above conditions.
+
+For FVP, the elf or the axf file can be run using the Fast Model
+executable as outlined under the [Starting Fast Model simulation](./setup.md/#starting-fast-model-simulation)
+except for the binary being pointed at here
+is the one just built using the steps in the previous section.
+
+### Setting up the MPS3 Arm Corstone-300 FVP
+
+For Ethos-U55 sample application, please download the MPS3 version of the
+Arm® Corstone™-300 model that contains Ethos-U55 and Arm® Cortex®-M55. The model is
+currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+    `./FVP_Corstone_SSE-300_Ethos-U55.sh`
+
+- Follow the instructions to install the FVP to your desired location
+
+### Deploying on an FVP emulating MPS3
+
+This section assumes that the FVP has been installed (see [Setting up the MPS3 Arm Corstone-300 FVP](#Setting-up-the-MPS3-Arm-Corstone-300-FVP)) to the user's home directory `~/FVP_Corstone_SSE-300_Ethos-U55`.
+
+The installation, typically, will have the executable under `~/FVP_Corstone_SSE-300_Ethos-U55/model/<OS>_<compiler-version>/`
+directory. For the example below, we assume it to be `~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4`.
+
+To run a use case on the FVP, from the [Build directory](../sections/building.md#Create-a-build-directory):
+
+```commandline
+~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-<use_case>.axf
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+
+    Ethos-U rev 0 --- Oct 13 2020 11:27:45
+    (C) COPYRIGHT 2019-2020 Arm Limited
+    ALL RIGHTS RESERVED
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started it outputs a menu and waits for the user input from telnet terminal.
+
+For example, the image classification use case can be started by:
+
+```commandline
+~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-img_class.axf
+```
+
+The FVP supports many command line parameters:
+
+- passed by using `-C <param>=<value>`. The most important ones are:
+  - `ethosu.num_macs`: Sets the Ethos-U55 configuration for the model. Valid parameters are `32`, `64`, `256`,
+    and the default one `128`. The number signifies the 8x8 MACs performed per cycle count available on the hardware.
+  - `cpu0.CFGITCMSZ`: ITCM size for the Cortex-M CPU. Size of ITCM is pow(2, CFGITCMSZ - 1) KB
+  - `cpu0.CFGDTCMSZ`: DTCM size for the Cortex-M CPU. Size of DTCM is pow(2, CFGDTCMSZ - 1) KB
+  - `mps3_board.telnetterminal0.start_telnet` : Starts the telnet session if nothing connected.
+  - `mps3_board.uart0.out_file`: Sets the output file to hold data written by the UART
+    (use '-' to send all output to stdout, empty by default).
+  - `mps3_board.uart0.shutdown_on_eot`: Sets to shutdown simulation when a EOT (ASCII 4) char is transmitted.
+  - `mps3_board.visualisation.disable-visualisation`: Enables or disables visualisation (disabled by default).
+
+  To start the model in `128` mode for Ethos-U55:
+
+    ```commandline
+    ~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -a ./bin/ethos-u-img_class.axf -C ethosu.num_macs=128
+    ```
+
+- `-l`: shows the full list of supported parameters
+
+    ```commandline
+    ~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 -l
+    ```
+
+- `--stat`: prints some run statistics on simulation exit
+
+    ```commandline
+    ~/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 --stat
+    ```
+
+- `--timelimit`: sets the number of wall clock seconds for the simulator to run, excluding startup and shutdown.
+
+## MPS3 board
+
+> **Note:**  Before proceeding, make sure you have the MPS3 board powered on,
+and USB A to B connected between your machine and the MPS3.
+The connector on the MPS3 is marked as "Debug USB".
+
+![MPS3](../media/mps3.png)
+
+1. MPS3 board top view.
+
+Once the board has booted, the micro SD card will enumerate as a mass
+storage device. On most systems this will be automatically mounted, but
+you might need to mount it manually.
+
+Also, there should be four serial-over-USB ports available for use via
+this connection. On Linux based machines, these would typically be
+*/dev/ttyUSB\<n\>* to */dev/ttyUSB\<n+3\>*.
+
+The default configuration for all of them is 115200, 8/N/1 (15200 bauds,
+8 bits, no parity and 1 stop bit) with no flow control.
+
+> **Note:** For Windows machines, additional FTDI drivers might need to be installed
+for these serial ports to be available.
+For more information on getting started with an MPS3 board, please refer to
+<https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/MPS3GettingStarted.pdf>
+
+### Deployment on MPS3 board
+
+> **NOTE**: These instructions are valid only if the evaluation is being
+ done using the MPS3 FPGA platform using either `SSE-200` or `SSE-300`.
+
+To run the application on MPS3 platform, firstly it's necessary to make sure
+that the platform has been set up using the correct configuration.
+For details, on platform set up, please see the relevant documentation. For `Arm Corstone-300`, this is available
+[here](https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/DAI0547B_SSE300_PLUS_U55_FPGA_for_mps3.pdf?revision=d088d931-03c7-40e4-9045-31ed8c54a26f&la=en&hash=F0C7837C8ACEBC3A0CF02D871B3A6FF93E09C6B8).
+
+For MPS3 board, instead of loading the axf file directly, the executable blobs
+generated under the *sectors/<use_case>* subdirectory need to be
+copied over to the MP3 board's micro SD card. Also, every use case build
+generates a corresponding images.txt file which is used by the MPS3 to
+understand which memory regions the blobs are to be loaded into.
+
+Once the USB A <--> B cable between the MPS3 and the development machine
+is connected and the MPS3 board powered on, the board should enumerate
+as a mass storage device over this USB connection.
+There might be two devices also, depending on the version of the board
+you are using. The device named `V2M-MPS3` or `V2MMPS3` is the `SD card`.
+
+If the axf/elf file is within 1MiB, it can be flashed into the FPGA
+memory directly without having to break it down into separate load
+region specific blobs. However, with neural network models exceeding
+this size, it becomes necessary to follow this approach.
+
+1. For example, the image classification use case will produce:
+
+    ```tree
+    ./bin/sectors/
+        └── img_class
+            ├── dram.bin
+            └── itcm.bin
+    ```
+
+    For example, if the micro SD card is mounted at
+    /media/user/V2M-MPS3/:
+
+    ```commandline
+    cp -av ./bin/sectors/img_class/* /media/user/V2M-MPS3/SOFTWARE/
+    ```
+
+2. The generated `\<use-case\>_images.txt` file needs to be copied
+over to the MPS3. The exact location for the destination will depend
+on the MPS3 board's version and the application note for the bit
+file in use.
+For example, for MPS3 board hardware revision C, using an
+application note directory named "ETHOSU", to replace the images.txt
+file:
+
+    ```commandline
+    cp ./bin/images-img_class.txt /media/user/V2M-MPS3/MB/HBI0309C/ETHOSU/images.txt
+    ```
+
+3. Open the first serial port available from MPS3, for example,
+"/dev/ttyUSB0". This can be typically done using minicom, screen or
+Putty application. Make sure the flow control setting is switched
+off.
+
+    ```commandline
+    minicom --D /dev/ttyUSB0
+    ```
+
+    ```log
+    Welcome to minicom 2.7.1
+    OPTIONS: I18n
+    Compiled on Aug 13 2017, 15:25:34.
+    Port /dev/ttyUSB0, 16:05:34
+    Press CTRL-A Z for help on special keys
+    Cmd>
+    ```
+
+4. In another terminal, open the second serial port, for example,
+    "/dev/ttyUSB1":
+
+    ```commandline
+    minicom --D /dev/ttyUSB1
+    ```
+
+5. On the first serial port, issue a "reboot" command and press the
+    return key
+
+    ```commandline
+    $ Cmd> reboot
+    ```
+
+    ```log
+    Rebooting...Disabling debug USB..Board rebooting...
+
+    ARM V2M-MPS3 Firmware v1.3.2
+    Build Date: Apr 20 2018
+
+    Powering up system...
+    Switching on main power...
+    Configuring motherboard (rev C, var A)...
+    ```
+
+    This will go on to reboot the board and prime the application to run by
+    flashing the binaries into their respective FPGA memory locations. For example:
+
+    ```log
+    Reading images file \MB\HBI0309C\ETHOSU\images.txt
+    Writing File \SOFTWARE\itcm.bin to Address 0x00000000
+
+    ............
+
+    File \SOFTWARE\itcm.bin written to memory address 0x00000000
+    Image loaded from \SOFTWARE\itcm.bin
+    Writing File \SOFTWARE\dram.bin to Address 0x08000000
+
+    ..........................................................................
+
+
+    File \SOFTWARE\dram.bin written to memory address 0x08000000
+    Image loaded from \SOFTWARE\dram.bin
+    ```
+
+6. When the reboot from previous step is completed, issue a reset
+        command on the command prompt.
+
+    ``` commandline
+    $ Cmd> reset
+    ```
+
+    This will trigger the application to start, and the output should be visible on the second serial connection.
+
+7. On the second serial port, output similar to section 2.2 should be visible:
+
+    ```log
+    [INFO] Setting up system tick IRQ (for NPU)
+    [INFO] V2M-MPS3 revision C
+    [INFO] Application Note AN540, Revision B
+    [INFO] FPGA build 1
+    [INFO] Core clock has been set to: 32000000 Hz
+    [INFO] CPU ID: 0x410fd220
+    [INFO] CPU: Cortex-M55 r0p0
+    ...
+    ```
+
+
+Next section of the main documentation, [Running code samples applications](../documentation.md#Running-code-samples-applications).
diff --git a/docs/sections/run.md b/docs/sections/run.md
new file mode 100644
index 0000000..90ee7c8
--- /dev/null
+++ b/docs/sections/run.md
@@ -0,0 +1,42 @@
+
+# Running Ethos-U55 Code Samples
+
+- [Starting Fast Model simulation](#starting-fast-model-simulation)
+
+This section covers the process for getting started with pre-built binaries for the Code Samples.
+
+## Starting Fast Model simulation
+
+Once built application binaries and assuming the install location of the FVP
+was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-<use_case>.axf
+```
+
+This will start the Fast Model simulation for the chosen use-case.
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's
+standard output and error log entries containing information about the
+pre-built application version, TensorFlow Lite Micro library version
+used, data type as well as the input and output tensor sizes of the
+model compiled into the executable binary.
+
+![FVP](../media/fvp.png)
+
+![FVP Terminal](../media/fvpterminal.png)
+
+> **Note:**
+For details on the specific use-case follow the instructions in the corresponding documentation.
+
+Next section of the documentation: [Implementing custom ML application](../documentation.md#Implementing-custom-ML-application).
diff --git a/docs/sections/testing_benchmarking.md b/docs/sections/testing_benchmarking.md
new file mode 100644
index 0000000..43bb7f4
--- /dev/null
+++ b/docs/sections/testing_benchmarking.md
@@ -0,0 +1,87 @@
+# Testing and benchmarking
+
+- [Testing](#testing)
+- [Benchmarking](#benchmarking)
+
+## Testing
+
+The `tests` folder has the following structure:
+
+```tree
+.
+├── common
+│   └── ...
+├── use_case
+│   ├── <usecase1>
+│   │   └── ...
+│   ├── <usecase2>
+│   │   └── ...
+└── utils
+    └── ...
+```
+
+Where:
+
+- `common`: contains tests for generic and common appplication functions.
+- `use_case`: contains all the use case specific tests in the respective folders.
+- `utils`: contains utilities sources used only within the tests.
+
+When [configuring](./building.md#configuring-the-build-native-unit-test) and
+[building](./building.md#Building-the-configured-project) for `native` target platform results of the build will
+be placed under `build/bin/` folder, for example:
+
+```tree
+.
+├── dev_ethosu_eval-<usecase1>-tests
+├── dev_ethosu_eval-<usecase2>-tests
+├── ethos-u-<usecase1>
+└── ethos-u-<usecase1>
+```
+
+To execute unit-tests for a specific use-case in addition to the common tests:
+
+```commandline
+dev_ethosu_eval-<use_case>-tests
+```
+
+```log
+[INFO] native platform initialised
+[INFO] ARM Ethos-U55 Evaluation application for MPS3 FPGA Prototyping Board and FastModel
+
+...
+===============================================================================
+   All tests passed (37 assertions in 7 test cases)
+```
+
+Tests output could have `[ERROR]` messages, that's alright - they are coming from negative scenarios tests.
+
+## Benchmarking
+
+Profiling is enabled by default when configuring the project. This will enable displaying:
+
+- the active and idle NPU cycle counts when Arm® Ethos™-U55 is enabled (see `-DETHOS_U55_ENABLED` in
+  [Build options](./building.md#build-options).
+- CPU cycle counts and/or in milliseconds elapsed for inferences performed if CPU profiling is enabled
+  (see `-DCPU_PROFILE_ENABLED` in [Build options](./building.md#build-options). This should be done only
+  when running on a physical FPGA board as the FVP does not contain a cycle-approximate or cycle-accurate Cortex-M model.
+
+For example:
+
+- On the FVP:
+
+```log
+    Active NPU cycles: 5475412
+    Idle NPU cycles:   702
+```
+
+- For MPS3 platform, the time duration in milliseconds is also reported when `-DCPU_PROFILE_ENABLED=1` is added to
+  CMake configuration command:
+
+```log
+    Active NPU cycles: 5629033
+    Idle NPU cycles:   1005276
+    Active CPU cycles: 993553 (approx)
+    Time in ms:        210
+```
+
+Next section of the main documentation: [Troubleshooting](../documentation.md#Troubleshooting).
diff --git a/docs/sections/troubleshooting.md b/docs/sections/troubleshooting.md
new file mode 100644
index 0000000..40b975a
--- /dev/null
+++ b/docs/sections/troubleshooting.md
@@ -0,0 +1,27 @@
+# Troubleshooting
+
+- [Inference results are incorrect for my custom files](#inference-results-are-incorrect-for-my-custom-files)
+- [The application does not work with my custom model](#the-application-does-not-work-with-my-custom-model)
+
+## Inference results are incorrect for my custom files
+
+Ensure that the files you are using match the requirements of the model
+you are using and that cmake parameters are set accordingly. More
+information on these cmake parameters is detailed in their separate
+sections. Note that preprocessing of the files could also affect the
+inference result, such as the rescaling and padding operations done for
+image classification.
+
+## The application does not work with my custom model
+
+Ensure that your model is in a fully quantized `.tflite` file format,
+either uint8 or int8, and has successfully been run through the Vela
+compiler.
+
+Check that cmake parameters match your new models input requirements.
+
+> **Note:** Vela tool is not available within this software project.
+It is a python tool available from <https://pypi.org/project/ethos-u-vela/>.
+The source code is hosted on <https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/>.
+
+Next section of the documentation: [Contribution guidelines](../documentation.md#Contribution-guidelines).
diff --git a/docs/use_cases/ad.md b/docs/use_cases/ad.md
new file mode 100644
index 0000000..ca95af8
--- /dev/null
+++ b/docs/use_cases/ad.md
@@ -0,0 +1,523 @@
+# Anomaly Detection Code Sample
+
+  - [Introduction](#introduction)
+    - [Prerequisites](#prerequisites)
+  - [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+    - [Build options](#build-options)
+    - [Build process](#build-process)
+    - [Add custom input](#add-custom-input)
+    - [Add custom model](#add-custom-model)
+  - [Setting-up and running Ethos-U55 Code Sample](#setting-up-and-running-ethos-u55-code-sample)
+    - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+    - [Starting Fast Model simulation](#starting-fast-model-simulation)
+    - [Running Anomaly Detection](#running-anomaly-detection)
+  - [Anomaly Detection processing information](#anomaly-detection-processing-information)
+    - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+    - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Anomaly Detection example.
+
+Use case code could be found in [source/use_case/ad](../../source/use_case/ad]) directory.
+
+### Preprocessing and feature extraction
+
+The Anomaly Detection model that is used with the Code Samples expects audio data to be preprocessed
+in a specific way before performing an inference. This section aims to provide an overview of the feature extraction
+process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+Next, a window of 1024 audio samples are taken from the start of the audio clip. From these 1024 samples we calculate 64
+Log Mel Energies that form part of a Log Mel Spectrogram.
+
+The window is shifted by 512 audio samples and another 64 Log Mel Energies are calculated. This is repeated until we
+have 64 sets of Log Mel Energies.
+
+This 64x64 matrix of values is resized by a factor of 2 resulting in a 32x32 matrix of values.
+
+The average of the training dataset is subtracted from this 32x32 matrix and an inference can then be performed.
+
+We start this process again but shifting the start by 20\*512=10240 audio samples. This keeps repeating until enough
+inferences have been performed to cover the whole audio clip.
+
+### Postprocessing
+
+Softmax is applied to the result of each inference. Based on the machine ID of the wav clip being processed we look at a
+specific index in each output vector. An average of the negative value at this index across all the inferences performed
+for the audio clip is taken. If this average value is greater than a chosen threshold score, then the machine in the
+clip is not behaving anomalously. If the score is lower than the threshold then the machine in the clip is behaving
+anomalously.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Anomaly Detection use case adds:
+
+- `ad_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into
+the application axf
+    file. The default value points to one of the delivered set of models. Note that the parameters `ad_LABELS_TXT_FILE`,
+    `TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally fall
+back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+model in this case will result in a runtime error.
+
+- `ad_FILE_PATH`: Path to the directory containing audio files, or a path to single WAV file, to be used in the
+    application. The default value points to the resources/ad/samples folder containing the delivered set of audio clips.
+
+- `ad_AUDIO_RATE`: Input data sampling rate. Each audio file from ad_FILE_PATH is preprocessed during the build to match
+NN model input requirements.
+    Default value is 16000.
+
+- `ad_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `ad_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `ad_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning the
+    whole audio file will be taken.
+
+- `ad_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter than
+    this number, it is padded with zeros. Default value is 16000.
+
+- `ad_MODEL_SCORE_THRESHOLD`: Threshold value to be applied to average softmax score over the clip, if larger than this
+score we have an anomaly.
+
+- `ad_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is set to
+    2MiB and should be enough for most models.
+
+In order to build **ONLY** Anomaly Detection example application add to the `cmake` command line specified in [Building](../documentation.md#Building) `-DUSE_CASE_BUILD=ad`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target
+>platform see [Building](../documentation.md#Building).
+
+Create a build directory folder and navigate inside:
+
+```commandline
+mkdir build_ad && cd build_ad
+```
+
+On Linux, execute the following command to build **only** Anomaly Detection application to run on the Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-.axf
+ â”œâ”€â”€ ethos-u-ad.htm
+ â”œâ”€â”€ ethos-u-.map
+ â”œâ”€â”€ images-ad.txt
+ â””── sectors
+      └── ad
+          ├── dram.bin
+          └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-ad.axf`: The built application binary for the Anomaly Detection use case.
+
+- `ethos-u-ad.map`: Information from building the application (e.g. libraries used, what was optimized, location of
+    objects)
+
+- `ethos-u-ad.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-ad.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/\*\* folder.
+
+### Add custom input
+
+The application anomaly detection on audio data found in the folder, or an individual file, set by the CMake parameter
+``ad_FILE_PATH``.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom clips into
+this folder:
+
+```commandline
+mkdir /tmp/custom_files
+
+cp custom_id_00.wav /tmp/custom_files/
+```
+
+> **Note:** The data used for this example comes from
+[https://zenodo.org/record/3384388\#.X6GILFNKiqA](https://zenodo.org/record/3384388\#.X6GILFNKiqA)
+and the model included in this example is trained on the ‘Slider’ part of the dataset.
+The machine ID (00, 02, 04, 06) the clip comes from must be in the file name for the application to work.
+The file name should have a pattern that matches
+e.g. `<any>_<text>_00_<here>.wav` if the audio was from machine ID 00
+or `<any>_<text>_02_<here>.wav` if it was from machine ID 02 etc.
+>
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set ad_FILE_PATH to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dad_FILE_PATH=/tmp/custom_files/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The images found in the _DIR folder will be picked up and automatically converted to C++ files during the CMake
+configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what image directory path has been used:
+
+```log
+-- User option ad_FILE_PATH is set to /tmp/custom_files
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter ``ad_MODEL_TFLITE_PATH``.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler
+>successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+An example:
+
+```commandline
+cmake \
+    -Dad_MODEL_TFLITE_PATH=<path/to/custom_ad_model_after_vela.tflite> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=ad ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `ad_MODEL_TFLITE_PATH` will be converted
+to C++ files during the CMake configuration
+stage and then compiled into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path has been used:
+
+```log
+-- User option TARGET_PLATFORM is set to fastmodel
+-- User option ad_MODEL_TFLITE_PATH is set to <path/to/custom_ad_model_after_vela.tflite>
+...
+-- Using <path/to/custom_ad_model_after_vela.tflite>
+++ Converting custom_ad_model_after_vela.tflite to custom_ad_model_after_vela.tflite.cc
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+ >**Note:** In order to successfully run the model, the NPU needs to be enabled and
+ the platform `TARGET_PLATFORM` is set to mps3 and TARGET_SUBSYSTEM is SSE-200 or SSE-300.
+
+## Setting-up and running Ethos-U55 Code Sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads
+](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+.FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+> **Note:** The anomaly detection example does not come pre-built. You will first need to follow the instructions in
+>section 3 for building the application from source.
+
+After building, and assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be
+started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55 ./bin/ethos-u-ad.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `ad_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from
+telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run single inference on the next in line.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+    By default, pre-built application has 4 files, indexes from 0 to 3.
+
+3. “Run ... on all” menu option triggers sequential inference executions on all built-in .
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 1024 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:  32
+    [INFO] 		2:  32
+    [INFO] 		3:   1
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.192437
+    [INFO] ZeroPoint[0] = 11
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 8 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   8
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.048891
+    [INFO] ZeroPoint[0] = -30
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 198016
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+
+    ```
+
+5. “List” menu option prints a list of pair ... indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 =>; anomaly_id_00_00000000.wav
+    [INFO] 1 =>; anomaly_id_02_00000076.wav
+    [INFO] 2 =>; normal_id_00_00000004.wav
+    [INFO] 3 =>; normal_id_02_00000001.wav
+    ```
+
+### Running Anomaly Detection
+
+Please select the first menu option to execute Anomaly Detection.
+
+The following example illustrates application output:
+
+```log
+[INFO] Running inference on audio clip 0 => anomaly_id_00_00000000.wav
+[INFO] Inference 1/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081154
+	Idle NPU cycles:   1012
+
+[INFO] Inference 2/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080934
+	Idle NPU cycles:   232
+
+[INFO] Inference 3/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081332
+	Idle NPU cycles:   834
+
+[INFO] Inference 4/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080748
+	Idle NPU cycles:   418
+
+[INFO] Inference 5/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080728
+	Idle NPU cycles:   438
+
+[INFO] Inference 6/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081144
+	Idle NPU cycles:   1022
+
+[INFO] Inference 7/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080924
+	Idle NPU cycles:   242
+
+[INFO] Inference 8/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081322
+	Idle NPU cycles:   844
+
+[INFO] Inference 9/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080738
+	Idle NPU cycles:   428
+
+[INFO] Inference 10/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080718
+	Idle NPU cycles:   448
+
+[INFO] Inference 11/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081134
+	Idle NPU cycles:   1032
+
+[INFO] Inference 12/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1080914
+	Idle NPU cycles:   252
+
+[INFO] Inference 13/13
+[INFO] Profile for Inference:
+	Active NPU cycles: 1081312
+	Idle NPU cycles:   854
+
+[INFO] Average anomaly score is: -0.024493
+Anomaly threshold is: -0.800000
+Anomaly detected!
+
+```
+
+As multiple inferences have to be run for one clip it will take around a minute or so for all inferences to complete.
+
+For the anomaly_id_00_00000000.wav clip, after averaging results across all inferences the score is greater than the
+chosen anomaly threshold so an anomaly was detected with the machine in this clip.
+
+The profiling section of the log shows that for each inference. For the last inference the profiling reports:
+
+- Ethos-U55's PMU report:
+
+  - 1,081,312 active cycles: number of cycles that were used for computation
+
+  - 854 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
diff --git a/docs/use_cases/asr.md b/docs/use_cases/asr.md
new file mode 100644
index 0000000..d224aca
--- /dev/null
+++ b/docs/use_cases/asr.md
@@ -0,0 +1,529 @@
+# Automatic Speech Recognition Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 Code Sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Automatic Speech Recognition](#running-automatic-speech-recognition)
+- [Automatic Speech Recognition processing information](#automatic-speech-recognition-processing-information)
+  - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+  - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Automatic Speech Recognition example.
+
+Use case code could be found in [source/use_case/asr](../../source/use_case/asr]) directory.
+
+### Preprocessing and feature extraction
+
+The wav2letter automatic speech recognition model that is used with the Code Samples expects audio data to be
+preprocessed in a specific way before performing an inference. This section aims to provide an overview of the feature
+extraction process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as
+>input for machine learning tasks like keyword spotting and speech recognition. See source/application/main/include/Mfcc.hpp
+>for implementation details.
+
+Next, a window of 512 audio samples is taken from the start of the audio clip. From these 512 samples we calculate 13
+MFCC features.
+
+The whole window is shifted to the right by 160 audio samples and 13 new MFCC features are calculated. This process of
+shifting and calculating is repeated until enough audio samples to perform an inference have been processed. In total
+this will be 296 windows that each have 13 MFCC features calculated for them.
+
+After extracting MFCC features the first and second order derivatives of these features with respect to time are
+calculated. These derivative features are then standardized and concatenated with the MFCC features (which also get
+standardized). At this point the input tensor will have a shape of 296x39.
+
+These extracted features are quantized, and an inference is performed.
+
+![ASR preprocessing](../media/ASR_preprocessing.png)
+
+For longer audio clips where multiple inferences need to be performed, then the initial starting position is offset by
+(100*160) = 16000 audio samples. From this new starting point, MFCC and derivative features are calculated as before
+until there is enough to perform another inference. Padding can be used if there are not enough audio samples for at
+least 1 inference. This step is repeated until the whole audio clip has been processed. If there are not enough audio
+samples for a final complete inference the MFCC features will be padded by repeating the last calculated feature until
+an inference can be performed.
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on
+>what was used during model training. These values are specific to each model. If you switch to a different ASR model
+>than the one supplied, then the feature extraction process could be completely different to the one currently implemented.
+
+The amount of audio samples we offset by for long audio clips is specific to the included wav2letter model.
+
+### Postprocessing
+
+After performing an inference, the raw output need to be postprocessed to get a usable result.
+
+The raw output from the model is a tensor of shape 148x29 where each row is a probability distribution over the possible
+29 characters that can appear at each of the 148 time steps.
+
+This wav2letter model is trained using context windows, this means that only certain parts of the output are usable
+depending on the bit of the audio clip that is currently being processed.
+
+If this is the first inference and multiple inferences are required, then ignore the final 49 rows of the output.
+Similarly, if this is the final inference from multiple inferences then ignore the first 49 rows of the output. Finally,
+if this inference is not the last or first inference then ignore the first and last 49 rows of the model output.
+
+> **Note:** If the audio clip is small enough then the whole of the model output is usable and there is no need to throw
+>away any of the output before continuing.
+
+Once any rows have been removed the final processing can be done. To process the output, first the letter with the
+highest probability at each time step is found. Next, any letters that are repeated multiple times in a row are removed
+(e.g. [t, t, t, o, p, p] becomes [t, o, p]). Finally, the 29th blank token letter is removed from the output.
+
+For the final output, the result from all inferences are combined before decoding. What you are left with is then
+displayed to the console.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Automatic Speech Recognition use case
+adds:
+
+- `asr_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into the
+application axf file. The default value points to one of the delivered set of models. Note that the parameters
+`asr_LABELS_TXT_FILE`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally
+fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+model in this case will result in a runtime error.
+
+- `asr_FILE_PATH`:  Path to the directory containing audio files, or a path to single WAV file, to be used in the
+    application. The default value points
+    to the resources/asr/samples folder containing the delivered set of audio clips.
+
+- `asr_LABELS_TXT_FILE`: Path to the labels' text file. The file is used to map letter class index to the text label.
+    The default value points to the delivered labels.txt file inside the delivery package.
+
+- `asr_AUDIO_RATE`: Input data sampling rate. Each audio file from asr_FILE_PATH is preprocessed during the build to
+    match NN model input requirements. Default value is 16000.
+
+- `asr_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `asr_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `asr_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning the
+    whole audio file will be taken.
+
+- `asr_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter than
+    this number, it is padded with zeros. Default value is 16000.
+
+- `asr_MODEL_SCORE_THRESHOLD`: Threshold value that must be applied to the inference results for a label to be
+    deemed valid. Default is 0.5.
+
+- `asr_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is set
+    to 2MiB and should be enough for most models.
+
+In order to build **ONLY** automatic speech recognition example application add to the `cmake` command line specified in
+[Building](../documentation.md#Building) `-DUSE_CASE_BUILD=asr`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target
+>platform see [Building](../documentation.md#Building) section.
+
+In order to build **only** the automatic speech recognition example, create a build directory and navigate inside:
+
+```commandline
+mkdir build_asr && cd build_asr
+```
+
+On Linux, execute the following command to build **only** Automatic Speech Recognition application to run on the
+Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+>[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=asr ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add `VERBOSE=1` to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-asr.axf
+ â”œâ”€â”€ ethos-u-asr.htm
+ â”œâ”€â”€ ethos-u-asr.map
+ â”œâ”€â”€ images-asr.txt
+ â””── sectors
+      └── asr
+          ├── dram.bin
+          └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-asr.axf`: The built application binary for the Automatic Speech Recognition use case.
+
+- `ethos-u-asr.map`: Information from building the application (e.g. libraries used, what was optimized, location of
+    objects)
+
+- `ethos-u-asr.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-asr.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/** folder.
+
+### Add custom input
+
+The application performs inference on audio data found in the folder, or an individual file, set by the CMake parameter
+`asr_FILE_PATH`.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom audio clips
+into this folder:
+
+```commandline
+mkdir /tmp/custom_wavs
+
+cp my_clip.wav /tmp/custom_wavs/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `asr_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dasr_FILE_PATH=/tmp/custom_wavs/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DUSE_CASE_BUILD=asr \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The audio clips found in the `asr_FILE_PATH` folder will be picked up and automatically converted to C++ files during the
+CMake configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what audio clip directory path has been used:
+
+```log
+-- User option asr_FILE_PATH is set to /tmp/custom_wavs
+-- Generating audio files from /tmp/custom_wavs
+++ Converting my_clip.wav to my_clip.cc
+++ Generating build/generated/asr/include/InputFiles.hpp
+++ Generating build/generated/asr/src/InputFiles.cc
+-- Defined build user options:
+-- asr_FILE_PATH=/tmp/custom_wavs
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter asr_AUDIO_MIN_SAMPLES determine the minimum number of input sample. When building the
+>application, if the size of the audio clips is less then asr_AUDIO_MIN_SAMPLES then it will be padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter MODEL_TFLITE_PATH.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela
+>compiler successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+labels_wav2letter.txt file for an example.
+
+Then, you must set `asr_MODEL_TFLITE_PATH` to the location of the Vela processed model file and `asr_LABELS_TXT_FILE`to
+the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dasr_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -Dasr_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `asr_MODEL_TFLITE_PATH` and labels text file pointed to by `asr_LABELS_TXT_FILE`
+will be converted to C++ files during the CMake configuration stage and then compiled into the application for performing
+inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option TARGET_PLATFORM is set to mps3
+-- User option asr_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option asr_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 Code Sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads
+](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-asr.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-asr.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `asr_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from
+telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run inference on the next in line voice clip from the collection of the
+    compiled audio.
+
+    > **Note:** Note that if the clip is over a certain length, the application will invoke multiple inference runs to
+>cover the entire file.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+    By default, pre-built application has 4 files, indexes from 0 to 3.
+
+3. “Run classification on all audio clips” menu option triggers sequential inference executions on all built-in voice
+    samples.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 11544 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 296
+    [INFO] 		2:  39
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.110316
+    [INFO] ZeroPoint[0] = -11
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 4292 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2: 148
+    [INFO] 		3:  29
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 783168
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+    ```
+
+5. “List” menu option prints a list of pair audio clip indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => anotherdoor.wav
+    [INFO] 1 => anotherengineer.wav
+    [INFO] 2 => itellyou.wav
+    [INFO] 3 => testingroutine.wav
+    ```
+
+### Running Automatic Speech Recognition
+
+Please select the first menu option to execute Automatic Speech Recognition.
+
+The following example illustrates application output:
+
+```log
+[INFO] Running inference on audio clip 0 => anotherdoor.wav
+[INFO] Inference 1/2
+[INFO] Profile for pre-processing:
+	Active NPU cycles: 0
+	Idle NPU cycles:   6
+
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924342
+	Idle NPU cycles:   824
+
+[INFO] Inference 2/2
+[INFO] Profile for pre-processing:
+	Active NPU cycles: 0
+	Idle NPU cycles:   6
+
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924298
+	Idle NPU cycles:   868
+
+[INFO] Result for inf 0: and he walked immediately out o t
+[INFO] Result for inf 1: he aparctment by anoer dor
+[INFO] Final result: and he walked immediately out o the aparctment by anoer dor
+```
+
+It could take several minutes to complete each inference (average time is 5-7 minutes), and on this audio clip multiple
+inferences were required to cover the whole clip.
+
+The profiling section of the log shows that for the first inference:
+
+- Ethos-U55's PMU report:
+
+  - 28,924,298 active cycles: number of NPU cycles that were used for computation
+
+  - 868 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the decoded output from each of the inference runs as well as the final combined result.
diff --git a/docs/use_cases/img_class.md b/docs/use_cases/img_class.md
new file mode 100644
index 0000000..7a409f2
--- /dev/null
+++ b/docs/use_cases/img_class.md
@@ -0,0 +1,446 @@
+# Image Classification Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 code sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Image Classification](#running-image-classification)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Image Classification
+example.
+
+Use case solves classical computer vision problem: image classification. The ML sample was developed using MobileNet v2
+model trained on ImageNet dataset.
+
+Use case code could be found in [source/use_case/img_class](../../source/use_case/img_class]) directory.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Image Classification use case specifies:
+
+- `img_class_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into
+    the application axf file. The default value points to one of the delivered set of models. Note that the parameters
+    `img_class_LABELS_TXT_FILE`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally
+    fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+    model in this case will result in a runtime error.
+
+- `img_class_FILE_PATH`: Path to the directory containing images, or path to a single image file, to be used file(s) in
+    the application. The default value points to the resources/img_class/samples folder containing the delivered
+    set of images. See more in the [Add custom input data section](#add-custom-input).
+
+- `img_class_IMAGE_SIZE`: The NN model requires input images to be of a specific size. This parameter defines the
+    size of the image side in pixels. Images are considered squared. Default value is 224, which is what the supplied
+    MobilenetV2-1.0 model expects.
+
+- `img_class_LABELS_TXT_FILE`: Path to the labels' text file to be baked into the application. The file is used to
+    map classified classes index to the text label. Change this parameter to point to the custom labels file to map
+    custom NN model output correctly.\
+    The default value points to the delivered labels.txt file inside the delivery package.
+
+- `img_class_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it
+    is set to 2MiB and should be enough for most models.
+
+- `USE_CASE_BUILD`: set to img_class to build only this example.
+
+In order to build **ONLY** Image Classification example application add to the `cmake` command line specified in
+[Building](../documentation.md#Building) `-DUSE_CASE_BUILD=img_class`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform
+see [Building](../documentation.md#Building).
+
+Create a build directory folder and navigate inside:
+
+```commandline
+mkdir build_img_class && cd build_img_class
+```
+
+On Linux, execute the following command to build **only** Image Classification application to run on the Ethos-U55 Fast
+Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+>[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeds, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-img_class.axf
+ â”œâ”€â”€ ethos-u-img_class.htm
+ â”œâ”€â”€ ethos-u-img_class.map
+ â”œâ”€â”€ images-img_class.txt
+ â””── sectors
+      └── img_class
+           ├── dram.bin
+           └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-img_class.axf`: The built application binary for the Image Classification use case.
+
+- `ethos-u-img_class.map`: Information from building the application (e.g. libraries used, what was optimized, location
+    of objects)
+
+- `ethos-u-img_class.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-img_class.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/** folder.
+
+### Add custom input
+
+The application performs inference on input data found in the folder, or an individual file set by the CMake parameter
+img_class_FILE_PATH.
+
+To run the application with your own images, first create a folder to hold them and then copy the custom images into
+this folder, for example:
+
+```commandline
+mkdir /tmp/custom_images
+
+cp custom_image1.bmp /tmp/custom_images/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `img_class_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dimg_class_FILE_PATH=/tmp/custom_images/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The images found in the `img_class_FILE_PATH` folder will be picked up and automatically converted to C++ files during
+the CMake configuration stage and then compiled into the application during the build phase for performing inference
+with.
+
+The log from the configuration stage should tell you what image directory path has been used:
+
+```log
+-- User option img_class_FILE_PATH is set to /tmp/custom_images
+-- User option img_class_IMAGE_SIZE is set to 224
+...
+-- Generating image files from /tmp/custom_images
+++ Converting custom_image1.bmp to custom_image1.cc
+...
+-- Defined build user options:
+...
+-- img_class_FILE_PATH=/tmp/custom_images
+-- img_class_IMAGE_SIZE=224
+```
+
+After compiling, your custom images will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter IMAGE_SIZE should match the model input size. When building the application,
+if the size of any image does not match IMAGE_SIZE then it will be rescaled and padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter MODEL_TFLITE_PATH.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler
+>successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+labels_mobilenet_v2_1.0_224.txt file for an example.
+
+Then, you must set `img_class_MODEL_TFLITE_PATH` to the location of the Vela processed model file and
+`img_class_LABELS_TXT_FILE` to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dimg_class_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -Dimg_class_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=img_class ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `img_class_MODEL_TFLITE_PATH` and labels text file pointed to by
+`img_class_LABELS_TXT_FILE` will be converted to C++ files during the CMake configuration stage and then compiled into
+the application for performing inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option img_class_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option img_class_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 code sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+$./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Pre-built application binary ethos-u-img_class.axf can be found in the bin/mps3-sse-300 folder of the delivery package.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-img_class.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `img_class_FILE_PATH` pointed to a single file (or a folder containing a single image)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from
+telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next image
+2. Classify image at chosen index
+3. Run classification on all images
+4. Show NN model info
+5. List images
+
+Choice:
+
+```
+
+1. “Classify next image” menu option will run single inference on the next in line image from the collection of the
+    compiled images.
+
+2. “Classify image at chosen index” menu option will run single inference on the chosen image.
+
+    > **Note:** Please make sure to select image index in the range of supplied images during application build.
+    By default, pre-built application has 4 images, indexes from 0 to 3.
+
+3. “Run classification on all images” menu option triggers sequential inference executions on all built-in images.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is UINT8
+    [INFO] 	tensor occupies 150528 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 224
+    [INFO] 		2: 224
+    [INFO] 		3:   3
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.007812
+    [INFO] ZeroPoint[0] = 128
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is UINT8
+    [INFO] 	tensor occupies 1001 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 1001
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.098893
+    [INFO] ZeroPoint[0] = 58
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 521760
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+    ```
+
+5. “List Images” menu option prints a list of pair image indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => cat.bmp
+    [INFO] 1 => dog.bmp
+    [INFO] 2 => kimono.bmp
+    [INFO] 3 => tiger.bmp
+    ```
+
+### Running Image Classification
+
+Please select the first menu option to execute Image Classification.
+
+The following example illustrates application output for classification:
+
+```log
+[INFO] Running inference on image 0 => cat.bmp
+[INFO] Profile for Inference:
+	Active NPU cycles: 7622641
+	Idle NPU cycles:   525
+
+[INFO] 0) 282 (14.636096) -> tabby, tabby cat
+[INFO] 1) 286 (14.537203) -> Egyptian cat
+[INFO] 2) 283 (12.757138) -> tiger cat
+[INFO] 3) 458 (7.021370) -> bow tie, bow-tie, bowtie
+[INFO] 4) 288 (7.021370) -> lynx, catamount
+```
+
+It could take several minutes to complete one inference run (average time is 2-3 minutes).
+
+The log shows the inference results for “image 0” (0 - index) that corresponds to “cat.bmp” in the sample image resource
+folder.
+
+The profiling section of the log shows that for this inference:
+
+- Ethos-U55's PMU report:
+
+  - 7,622,641 active cycles: number of NPU cycles that were used for computation
+
+  - 525 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the top 5 classes with indexes, confidence score and labels from associated
+labels_mobilenet_v2_1.0_224.txt file. The FVP window also shows the output on its LCD section.
diff --git a/docs/use_cases/inference_runner.md b/docs/use_cases/inference_runner.md
new file mode 100644
index 0000000..ffb205e
--- /dev/null
+++ b/docs/use_cases/inference_runner.md
@@ -0,0 +1,296 @@
+# Inference Runner Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the Code Samples application from sources](#building-the-code-samples-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 code sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Inference Runner](#running-inference-runner)
+- [Inference Runner processing information](#inference-runner-processing-information)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 NPU Inference Runner.
+The inference runner is intended for quickly checking profiling results for any desired network, providing it has been
+processed by the Vela compiler.
+
+A simple model is provided with the Inference Runner as an example, but it is expected that the user will replace this
+model with one they wish to profile, see [Add custom model](./inference_runner.md#Add-custom-model) for more details.
+
+The inference runner is intended for quickly checking profiling results for any desired network
+providing it has been processed by the Vela compiler.
+
+The inference runner will populate all input tensors for the provided model with randomly generated data and an
+inference is then performed. Profiling results are then displayed in the console.
+
+Use case code could be found in [source/use_case/inference_runner](../../source/use_case/inference_runner]) directory.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the Code Samples application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, the Inference Runner use case adds:
+
+- `inference_runner_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and
+  included into the application axf file. The default value points to one of the delivered set of models.
+  Note that the parameters `TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally
+    all back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized model
+    in this case will result in a runtime error.
+
+- `inference_runner_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By
+    default, it is set to 2MiB and should be enough for most models.
+
+In order to build **ONLY** Inference Runner example application add to the `cmake` command line specified in
+[Building](../documentation.md#Building) `-DUSE_CASE_BUILD=inferece_runner`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform
+>see [Building](../documentation.md#Building) section.
+
+Create a build directory and navigate inside:
+
+```commandline
+mkdir build_inference_runner && cd build_inference_runner
+```
+
+On Linux, execute the following command to build **only** Inference Runner application to run on the Ethos-U55 Fast
+Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see
+>[Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by
+default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=inference_runner ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-inference_runner.axf
+ â”œâ”€â”€ ethos-u-inference_runner.htm
+ â”œâ”€â”€ ethos-u-inference_runner.map
+ â”œâ”€â”€ images-inference_runner.txt
+ â””── sectors
+      ├── kws
+      │ └── ...
+      └── img_class
+        ├── dram.bin
+        └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-inference_runner.axf`: The built application binary for the Inference Runner use case.
+
+- `ethos-u-inference_runner.map`: Information from building the application (e.g. libraries used, what was optimized,
+    location of objects)
+
+- `ethos-u-inference_runner.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-inference_runner.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/**
+    folder.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter `inference_runner_MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler
+>successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+Then, you must set `inference_runner_MODEL_TFLITE_PATH` to the location of the Vela processed model file.
+
+An example:
+
+```commandline
+cmake \
+  -Dinference_runner_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+  -DTARGET_PLATFORM=mps3 \
+  -DTARGET_SUBSYSTEM=sse-300 \
+  -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `inference_runner_MODEL_TFLITE_PATH` will be converted to C++ files during the CMake
+configuration stage and then compiled into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path has been used:
+
+```stdout
+-- User option inference_runner_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 code sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from
+[Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-infernce_runner.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-inference_runner.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+### Running Inference Runner
+
+After the application has started the inference starts immediately and it outputs the results on the telnet terminal.
+
+The following example illustrates application output:
+
+```log
+[INFO] Profile for Inference:
+       Active NPU cycles: 26976
+       Idle NPU cycles: 196
+```
+
+After running an inference on randomly generated data, the output of the log shows the profiling results that for this
+inference:
+
+- Ethos-U55's PMU report:
+
+  - 26,976 active cycles: number of cycles that were used for computation
+
+  - 196 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
diff --git a/docs/use_cases/kws.md b/docs/use_cases/kws.md
new file mode 100644
index 0000000..316b501
--- /dev/null
+++ b/docs/use_cases/kws.md
@@ -0,0 +1,474 @@
+# Keyword Spotting Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 code sample](#setting-up-and-running-ethos-u55-code-sample)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Keyword Spotting](#running-keyword-spotting)
+- [Keyword Spotting processing information](#keyword-spotting-processing-information)
+  - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+  - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U55 Keyword Spotting
+example.
+
+Use case code could be found in [source/use_case/kws](../../source/use_case/kws]) directory.
+
+### Preprocessing and feature extraction
+
+The DS-CNN keyword spotting model that is supplied with the Code Samples expects audio data to be preprocessed in
+a specific way before performing an inference. This section aims to provide an overview of the feature extraction
+process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as
+>input for machine learning tasks like keyword spotting and speech recognition.
+>See source/application/main/include/Mfcc.hpp for implementation details.
+
+Next, a window of 640 audio samples is taken from the start of the audio clip. From these 640 samples we calculate 10
+MFCC features.
+
+The whole window is shifted to the right by 320 audio samples and 10 new MFCC features are calculated. This process of
+shifting and calculating is repeated until the end of the 16000 audio samples needed to perform an inference is reached.
+In total this will be 49 windows that each have 10 MFCC features calculated for them, giving an input tensor of shape
+49x10.
+
+These extracted features are quantized, and an inference is performed.
+
+![KWS preprocessing](../media/KWS_preprocessing.png)
+
+If the audio clip is longer than 16000 audio samples then the initial starting position is offset by 16000/2 = 8000
+audio samples. From this new starting point, MFCC features for the next 16000 audio samples are calculated and another
+inference is performed (i.e. do an inference for samples 8000-24000).
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on
+>what was used during model training. These values are specific to each model and if you try a different keyword spotting
+>model that uses MFCC input then values are likely to need changing to match the new model.
+In addition, MFCC feature extraction methods can vary slightly with different normalization methods or scaling etc. being used.
+
+### Postprocessing
+
+After an inference is complete the highest probability detected word is output to console, providing its probability is
+larger than a threshold value (default 0.9).
+
+If multiple inferences are performed for an audio clip, then multiple results will be output.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, keyword spotting use case adds:
+
+- `kws_MODEL_TFLITE_PATH` - Path to the NN model file in TFLite format. Model will be processed and included into the application axf file. The default value points to one of the delivered set of models. Note that the parameters `kws_LABELS_TXT_FILE`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED` should be aligned with the chosen model, i.e.:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized model in this case will result in a runtime error.
+
+- `kws_FILE_PATH`: Path to the directory containing audio files, or a path to single WAV file, to be used in the application. The default value points
+    to the resources/kws/samples folder containing the delivered set of audio clips.
+
+- `kws_LABELS_TXT_FILE`: Path to the labels' text file. The file is used to map key word class index to the text
+    label. The default value points to the delivered labels.txt file inside the delivery package.
+
+- `kws_AUDIO_RATE`: Input data sampling rate. Each audio file from kws_FILE_PATH is preprocessed during the build to
+    match NN model input requirements. Default value is 16000.
+
+- `kws_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `kws_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `kws_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning the
+    whole audio file will be taken.
+
+- `kws_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter than
+    this number, it is padded with zeros. Default value is 16000.
+
+- `kws_MODEL_SCORE_THRESHOLD`: Threshold value [0.0, 1.0] that must be applied to the inference results for a
+    label to be deemed valid. Default is 0.9
+
+- `kws_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is set
+    to 1MiB and should be enough for most models.
+
+In order to build **ONLY** keyword spotting example application add to the `cmake` command line specified in [Building](../documentation.md#Building) `-DUSE_CASE_BUILD=kws`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform see [Building](../documentation.md#Building) section.
+
+In order to build **only** the keyword spotting example, create a build directory and
+navigate inside, for example:
+
+```commandline
+mkdir build_kws && cd build_kws
+```
+
+On Linux, execute the following command to build Keyword Spotting application to run on the Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see [Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=kws ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-kws.axf
+ â”œâ”€â”€ ethos-u-kws.htm
+ â”œâ”€â”€ ethos-u-kws.map
+ â”œâ”€â”€ images-kws.txt
+ â””── sectors
+      └── kws
+           ├── dram.bin
+           └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-kws.axf`: The built application binary for the Keyword Spotting use case.
+
+- `ethos-u-kws.map`: Information from building the application (e.g. libraries used, what was optimized, location of
+    objects)
+
+- `ethos-u-kws.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-kws.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/\*\* folder.
+
+### Add custom input
+
+The application performs inference on audio data found in the folder, or an individual file, set by the CMake parameter `kws_FILE_PATH`.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom audio files
+into this folder, for example:
+
+```commandline
+mkdir /tmp/custom_wavs
+
+cp my_clip.wav /tmp/custom_wavs/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `kws_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dkws_FILE_PATH=/tmp/custom_wavs/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DUSE_CASE_BUILD=kws \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The audio clips found in the `kws_FILE_PATH` folder will be picked up and automatically converted to C++ files during the
+CMake configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what audio clip directory path has been used:
+
+```log
+-- User option kws_FILE_PATH is set to /tmp/custom_wavs
+-- Generating audio files from /tmp/custom_wavs
+++ Converting my_clip.wav to my_clip.cc
+++ Generating build/generated/kws/include/AudioClips.hpp
+++ Generating build/generated/kws/src/AudioClips.cc
+-- Defined build user options:
+-- kws_FILE_PATH=/tmp/custom_wavs
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter `kws_AUDIO_MIN_SAMPLES` determine the minimum number of input sample. When building the application,
+if the size of the audio clips is less then `kws_AUDIO_MIN_SAMPLES` then it will be padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter `kws_MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+ds_cnn_labels.txt file for an example.
+
+Then, you must set kws_MODEL_TFLITE_PATH to the location of the Vela processed model file and kws_LABELS_TXT_FILE
+to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dkws_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+    -Dkws_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DUSE_CASE_BUILD=kws \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model file pointed to by `kws_MODEL_TFLITE_PATH` and labels text file pointed to by `kws_LABELS_TXT_FILE` will
+be converted to C++ files during the CMake configuration stage and then compiled into the application for performing
+inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option kws_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option kws_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to\
+custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 code sample
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-kws.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-kws.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `kws_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run inference on the next in line voice clip from the collection of the
+    compiled audio.
+
+    > **Note:** Note that if the clip is over a certain length, the application will invoke multiple inference runs to cover the entire file.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+    By default, pre-built application has 4 files, indexes from 0 to 3.
+
+3. “Run classification on all audio clips” menu option triggers sequential inference executions on all built-in voice
+    samples.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model info:
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 490 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2:  49
+    [INFO] 		3:  10
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 1.107164
+    [INFO] ZeroPoint[0] = 95
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 12 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:  12
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 72848
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    [INFO] Use of Arm uNPU is enabled
+    ```
+
+5. “List audio clips” menu option prints a list of pair audio indexes - the original filenames embedded in the
+    application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => down.wav
+    [INFO] 1 => rightleftup.wav
+    [INFO] 2 => yes.wav
+    [INFO] 3 => yesnogostop.wav
+    ```
+
+### Running Keyword Spotting
+
+Selecting the first option will run inference on the first file.
+
+The following example illustrates application output for classification:
+
+```log
+[INFO] Running inference on audio clip 0 => down.wav
+[INFO] Inference 1/1
+[INFO] Profile for Inference:
+	Active NPU cycles: 680400
+	Idle NPU cycles:   766
+
+[INFO] For timestamp: 0.000000 (inference #: 0); threshold: 0.900000
+[INFO] 		label @ 0: down, score: 0.996094
+```
+
+Each inference should take less than 30 seconds on most systems running Fast Model.
+The profiling section of the log shows that for this inference:
+
+- Ethos-U55's PMU report:
+
+  - 680,400 active cycles: number of cycles that were used for computation
+
+  - 766 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+    the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the highest confidence score and the associated label from ds_cnn_labels.txt file.
\ No newline at end of file
diff --git a/docs/use_cases/kws_asr.md b/docs/use_cases/kws_asr.md
new file mode 100644
index 0000000..e79b887
--- /dev/null
+++ b/docs/use_cases/kws_asr.md
@@ -0,0 +1,589 @@
+# Keyword Spotting and Automatic Speech Recognition Code Sample
+
+- [Introduction](#introduction)
+  - [Prerequisites](#prerequisites)
+- [Building the code sample application from sources](#building-the-code-sample-application-from-sources)
+  - [Build options](#build-options)
+  - [Build process](#build-process)
+  - [Add custom input](#add-custom-input)
+  - [Add custom model](#add-custom-model)
+- [Setting-up and running Ethos-U55 Code Samples](#setting-up-and-running-ethos-u55-code-samples)
+  - [Setting up the Ethos-U55 Fast Model](#setting-up-the-ethos-u55-fast-model)
+  - [Starting Fast Model simulation](#starting-fast-model-simulation)
+  - [Running Keyword Spotting and Automatic Speech Recognition](#running-keyword-spotting-and-automatic-speech-recognition)
+- [Keyword Spotting and Automatic Speech Recognition processing information](#keyword-spotting-and-automatic-speech-recognition-processing-information)
+  - [Preprocessing and feature extraction](#preprocessing-and-feature-extraction)
+    - [Keyword Spotting Preprocessing](#keyword-spotting-preprocessing)
+    - [Automatic Speech Recognition Preprocessing](#automatic-speech-recognition-preprocessing)
+  - [Postprocessing](#postprocessing)
+
+## Introduction
+
+This document describes the process of setting up and running an example of sequential execution of the Keyword Spotting
+and Automatic Speech Recognition models on Cortex-M CPU and Ethos-U NPU.
+
+The Keyword Spotting and Automatic Speech Recognition example demonstrates how to run multiple models sequentially. A
+Keyword Spotting model is first run on the CPU and if a set keyword is detected then an Automatic Speech Recognition
+model is run on Ethos-U55 on the remaining audio.
+Tensor arena memory region is reused between models to optimise application memory footprint.
+
+"Yes" key word is used to trigger full command recognition following the key word.
+Use case code could be found in [source/use_case/kws_asr](../../source/use_case/kws_asr]) directory.
+
+### Preprocessing and feature extraction
+
+In this use-case there are 2 different models being used with different requirements for preprocessing. As such each
+preprocessing process is detailed below. Note that Automatic Speech Recognition only occurs if a keyword is detected in
+the audio clip.
+
+By default the KWS model is run purely on CPU and not on the Ethos-U55.
+
+#### Keyword Spotting Preprocessing
+
+The DS-CNN keyword spotting model that is used with the Code Samples expects audio data to be preprocessed in
+a specific way before performing an inference. This section aims to provide an overview of the feature extraction
+process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as input for machine learning tasks like keyword spotting and speech recognition. See source/application/main/include/Mfcc.hpp for implementation details.
+
+Next, a window of 640 audio samples is taken from the start of the audio clip. From these 640 samples we calculate 10
+MFCC features.
+
+The whole window is shifted to the right by 320 audio samples and 10 new MFCC features are calculated. This process of
+shifting and calculating is repeated until the end of the 16000 audio samples needed to perform an inference is reached.
+In total this will be 49 windows that each have 10 MFCC features calculated for them, giving an input tensor of shape
+49x10.
+
+These extracted features are quantized, and an inference is performed.
+
+If the audio clip is longer than 16000 audio samples then the initial starting position is offset by 16000/2 = 8000
+audio samples. From this new starting point, MFCC features for the next 16000 audio samples are calculated and another
+inference is performed (i.e. do an inference for samples 8000-24000).
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on what was used during model training. These values are specific to each model and if you try a different keyword spotting model that uses MFCC input then values are likely to need changing to match the new model.
+
+In addition, MFCC feature extraction methods can vary slightly with different normalization methods or scaling etc. being used.
+
+#### Automatic Speech Recognition Preprocessing
+
+The wav2letter automatic speech recognition model that is used with the Code Samples expects audio data to be
+preprocessed in a specific way before performing an inference. This section aims to provide an overview of the feature
+extraction process used.
+
+First the audio data is normalized to the range (-1, 1).
+
+> **Note:** Mel-frequency cepstral coefficients (MFCCs) are a common feature extracted from audio data and can be used as input for machine learning tasks like keyword spotting and speech recognition. See source/application/main/include/Mfcc.hpp for implementation details.
+
+Next, a window of 512 audio samples is taken from the start of the audio clip. From these 512 samples we calculate 13
+MFCC features.
+
+The whole window is shifted to the right by 160 audio samples and 13 new MFCC features are calculated. This process of
+shifting and calculating is repeated until enough audio samples to perform an inference have been processed. In total
+this will be 296 windows that each have 13 MFCC features calculated for them.
+
+After extracting MFCC features the first and second order derivatives of these features with respect to time are
+calculated. These derivative features are then standardized and concatenated with the MFCC features (which also get
+standardized). At this point the input tensor will have a shape of 296x39.
+
+These extracted features are quantized, and an inference is performed.
+
+For longer audio clips where multiple inferences need to be performed, then the initial starting position is offset by
+(100\*160) = 16000 audio samples. From this new starting point, MFCC and derivative features are calculated as before
+until there is enough to perform another inference. Padding can be used if there are not enough audio samples for at
+least 1 inference. This step is repeated until the whole audio clip has been processed. If there are not enough audio
+samples for a final complete inference the MFCC features will be padded by repeating the last calculated feature until
+an inference can be performed.
+
+> **Note:** Parameters of the MFCC feature extraction such as window size, stride, number of features etc. all depend on what was used during model training. These values are specific to each model. If you switch to a different ASR model than the one supplied, then the feature extraction process could be completely different to the one currently implemented.
+
+The amount of audio samples we offset by for long audio clips is specific to the included wav2letter model.
+
+### Postprocessing
+
+If a keyword is detected then the ASR process is run and the raw output of that inference needs to be postprocessed to
+get a usable result.
+
+The raw output from the model is a tensor of shape 148x29 where each row is a probability distribution over the possible
+29 characters that can appear at each of the 148 time steps.
+
+This wav2letter model is trained using context windows, this means that only certain parts of the output are usable
+depending on the bit of the audio clip that is currently being processed.
+
+If this is the first inference and multiple inferences are required, then ignore the final 49 rows of the output.
+Similarly, if this is the final inference from multiple inferences then ignore the first 49 rows of the output. Finally,
+if this inference is not the last or first inference then ignore the first and last 49 rows of the model output.
+
+> **Note:** If the audio clip is small enough then the whole of the model output is usable and there is no need to throw away any of the output before continuing.
+
+Once any rows have been removed the final processing can be done. To process the output, first the letter with the
+highest probability at each time step is found. Next, any letters that are repeated multiple times in a row are removed
+(e.g. [t, t, t, o, p, p] becomes [t, o, p]). Finally, the 29^th^ blank token letter is removed from the output.
+
+For the final output, the result from all inferences are combined before decoding. What you are left with is then
+displayed to the console.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the code sample application from sources
+
+### Build options
+
+In addition to the already specified build option in the main documentation, Keyword Spotting and Automatic Speech
+Recognition use case adds:
+
+- `kws_asr_MODEL_TFLITE_PATH_ASR` and `kws_asr_MODEL_TFLITE_PATH_KWS`: Path to the NN model files in TFLite format.
+    Models will be processed and included into the application axf file. The default value points to one of the delivered set of models.
+    Note that the parameters `kws_asr_LABELS_TXT_FILE_KWS`, `kws_asr_LABELS_TXT_FILE_ASR`,`TARGET_PLATFORM` and `ETHOS_U55_ENABLED`
+    should be aligned with the chosen model, i.e:
+  - if `ETHOS_U55_ENABLED` is set to `On` or `1`, the NN model is assumed to be optimized. The model will naturally fall back to the Arm® Cortex®-M CPU if an unoptimized model is supplied.
+  - if `ETHOS_U55_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized model in this case will result in a runtime error.
+
+- `kws_asr_FILE_PATH`: Path to the directory containing audio files, or a path to single WAV file, to be used in the application. The default value
+    points to the resources/kws_asr/samples folder containing the delivered set of audio clips.
+
+- `kws_asr_LABELS_TXT_FILE_KWS` and `kws_asr_LABELS_TXT_FILE_ASR`: Path respectively to keyword spotting labels' and the automatic speech
+    recognition labels' text files. The file is used to map
+    letter class index to the text label. The default value points to the delivered labels.txt file inside the delivery
+    package.
+
+- `kws_asr_AUDIO_RATE`: Input data sampling rate. Each audio file from kws_asr_FILE_PATH is preprocessed during the
+    build to match NN model input requirements. Default value is 16000.
+
+- `kws_asr_AUDIO_MONO`: If set to ON the audio data will be converted to mono. Default is ON.
+
+- `kws_asr_AUDIO_OFFSET`: Start loading audio data starting from this offset (in seconds). Default value is 0.
+
+- `kws_asr_AUDIO_DURATION`: Length of the audio data to be used in the application in seconds. Default is 0 meaning
+    the whole audio file will be taken.
+
+- `kws_asr_AUDIO_MIN_SAMPLES`: Minimum number of samples required by the network model. If the audio clip is shorter
+    than this number, it is padded with zeros. Default value is 16000.
+
+- `kws_asr_MODEL_SCORE_THRESHOLD_KWS`: Threshold value that must be applied to the keyword spotting inference
+    results for a label to be deemed valid. Default is 0.9.
+
+- `kws_asr_MODEL_SCORE_THRESHOLD_ASR`: Threshold value that must be applied to the automatic speech recognition
+    inference results for a label to be deemed valid. Default is 0.5.
+
+- `kws_asr_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default, it is
+    set to 2MiB and should be enough for most models.
+
+In order to build **ONLY** Keyword Spotting and Automatic Speech
+Recognition example application add to the `cmake` command line specified in [Building](../documentation.md#Building) `-DUSE_CASE_BUILD=kws_asr`.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target platform see [Building](../documentation.md#Building).
+
+Create a build directory and navigate inside:
+
+```commandline
+mkdir build_kws_asr && cd build_kws_asr
+```
+
+On Linux, execute the following command to build the application to run on the Ethos-U55 Fast Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+For Windows, add `-G "MinGW Makefiles"`:
+
+```commandline
+cmake \
+    -G "MinGW Makefiles" \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=./scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+Toolchain option `CMAKE_TOOLCHAIN_FILE` points to the toolchain specific file to set the compiler and platform specific
+parameters.
+
+To configure a build that can be debugged using Arm-DS, we can just specify
+the build type as `Debug`:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+To configure a build that can be debugged using a tool that only supports
+DWARF format 3 (Modeldebugger for example), we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DCMAKE_BUILD_TYPE=Debug \
+    -DARMCLANG_DEBUG_DWARF_LEVEL=3 \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+> **Note:** If building for different Ethos-U55 configurations, see [Configuring build for different Arm Ethos-U55 configurations](../sections/building.md#Configuring-build-for-different-Arm-Ethos-U55-configurations):
+
+If the TensorFlow source tree is not in its default expected location,
+set the path using `TENSORFLOW_SRC_PATH`.
+Similarly, if the Ethos-U55 driver is not in the default location,
+`ETHOS_U55_DRIVER_SRC_PATH` can be used to configure the location. For example:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=/my/custom/location/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=/my/custom/location/core_driver \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+Also, `CMSIS_SRC_PATH` parameter can be used to override the CMSIS sources used for compilation used by TensorFlow by default. For example, to use the CMSIS sources fetched by the ethos-u helper script, we can use:
+
+```commandline
+cmake \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DTENSORFLOW_SRC_PATH=../ethos-u/core_software/tensorflow \
+    -DETHOS_U55_DRIVER_SRC_PATH=../ethos-u/core_software/core_driver \
+    -DCMSIS_SRC_PATH=../ethos-u/core_software/cmsis \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+For Windows, use `mingw32-make`.
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ â”œâ”€â”€ ethos-u-kws_asr.axf
+ â”œâ”€â”€ ethos-u-kws_asr.htm
+ â”œâ”€â”€ ethos-u-kws_asr.map
+ â”œâ”€â”€ images-kws_asr.txt
+ â””── sectors
+      └── kws_asr
+           ├── dram.bin
+           └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-kws_asr.axf`: The built application binary for the Keyword Spotting and Automatic Speech Recognition use
+    case.
+
+- `ethos-u-kws_asr.map`: Information from building the application (e.g. libraries used, what was optimized, location
+    of objects)
+
+- `ethos-u-kws_asr.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-kws_asr.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/** folder.
+
+### Add custom input
+
+The application performs inference on data found in the folder set by the CMake parameter `kws_asr_FILE_PATH`.
+
+To run the application with your own audio clips first create a folder to hold them and then copy the custom files into
+this folder:
+
+```commandline
+mkdir /tmp/custom_files
+
+cp custom_audio1.wav /tmp/custom_files/
+```
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+Next set `kws_asr_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake \
+    -Dkws_asr_FILE_PATH=/tmp/custom_files/ \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr- ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+The files found in the `kws_asr_FILE_PATH` folder will be picked up and automatically converted to C++ files during the
+CMake configuration stage and then compiled into the application during the build phase for performing inference with.
+
+The log from the configuration stage should tell you what directory path has been used:
+
+```log
+-- User option kws_asr_FILE_PATH is set to /tmp/custom_files
+```
+
+After compiling, your custom inputs will have now replaced the default ones in the application.
+
+### Add custom model
+
+The application performs KWS inference using the model pointed to by the CMake parameter `kws_asr_MODEL_TFLITE_PATH_KWS` and
+ASR inference using the model pointed to by the CMake parameter `kws_asr_MODEL_TFLITE_PATH_ASR`.
+
+This section assumes you wish to change the existing ASR model to a custom one. If instead you wish to change the KWS
+model then the instructions will be the same except ASR will change to KWS.
+
+> **Note:** If you want to run the model using Ethos-U55, ensure your custom model has been run through the Vela compiler successfully before continuing. See [Optimize model with Vela compiler](../sections/building.md#Optimize-custom-model-with-Vela-compiler).
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels
+associated with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+labels_wav2letter.txt file for an example.
+
+Then, you must set `kws_asr_MODEL_TFLITE_PATH_ASR` to the location of the Vela processed model file and
+`kws_asr_LABELS_TXT_FILE_ASR` to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+    -Dkws_asr_MODEL_TFLITE_PATH_ASR=<path/to/custom_asr_model_after_vela.tflite> \
+    -Dkws_asr_LABELS_TXT_FILE_ASR=<path/to/labels_custom_model.txt> \
+    -DTARGET_PLATFORM=mps3 \
+    -DTARGET_SUBSYSTEM=sse-300 \
+    -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/bare-metal-toolchain.cmake \
+    -DUSE_CASE_BUILD=kws_asr ..
+```
+
+For Windows, add `-G "MinGW Makefiles"` to the CMake command.
+
+> **Note:** Clean the build directory before re-running the CMake command.
+
+The `.tflite` model files pointed to by `kws_asr_MODEL_TFLITE_PATH_KWS` and `kws_asr_MODEL_TFLITE_PATH_ASR`, labels text files pointed to by `kws_asr_LABELS_TXT_FILE_KWS` and `kws_asr_LABELS_TXT_FILE_ASR`
+will be converted to C++ files during the CMake configuration stage and then compiled into the application for
+performing inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option TARGET_PLATFORM is set to mps3
+-- User option kws_asr_MODEL_TFLITE_PATH_ASR is set to <path/to/custom_asr_model_after_vela.tflite>
+...
+-- User option kws_asr_LABELS_TXT_FILE_ASR is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_asr_model_after_vela.tflite>
+++ Converting custom_asr_model_after_vela.tflite to\
+custom_asr_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to Labels_wav2letter
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting-up and running Ethos-U55 Code Samples
+
+### Setting up the Ethos-U55 Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For Ethos-U55 evaluation, please download the MPS3 version of the Arm® Corstone™-300 model that contains Ethos-U55 and
+Cortex-M55. The model is currently only supported on Linux based machines. To install the FVP:
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+./FVP_Corstone_SSE-300_Ethos-U55.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Once completed the building step, application binary ethos-u-kws_asr.axf can be found in the `build/bin` folder.
+Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+$ ~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-kws_asr.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `kws_asr_FILE_PATH` pointed to a single file (or a folder containing a single input file)
+the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the user input from telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next audio clip
+2. Classify audio clip at chosen index
+3. Run classification on all audio clips
+4. Show NN model info
+5. List audio clips
+
+Choice:
+
+```
+
+1. “Classify next audio clip” menu option will run single inference on the next included file.
+
+2. “Classify audio clip at chosen index” menu option will run inference on the chosen audio clip.
+
+    > **Note:** Please make sure to select audio clip index in the range of supplied audio clips during application build.
+
+3. “Run ... on all” menu option triggers sequential inference executions on all built-in files.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+    ```log
+    [INFO] uTFL version: 2.5.0
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 490 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2:  49
+    [INFO] 		3:  10
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 1.107164
+    [INFO] ZeroPoint[0] = 95
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 12 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:  12
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 123616
+    [INFO] Number of operators: 16
+    [INFO] 	Operator 0: RESHAPE
+    [INFO] 	Operator 1: CONV_2D
+    [INFO] 	Operator 2: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 3: CONV_2D
+    [INFO] 	Operator 4: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 5: CONV_2D
+    [INFO] 	Operator 6: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 7: CONV_2D
+    [INFO] 	Operator 8: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 9: CONV_2D
+    [INFO] 	Operator 10: DEPTHWISE_CONV_2D
+    [INFO] 	Operator 11: CONV_2D
+    [INFO] 	Operator 12: AVERAGE_POOL_2D
+    [INFO] 	Operator 13: RESHAPE
+    [INFO] 	Operator 14: FULLY_CONNECTED
+    [INFO] 	Operator 15: SOFTMAX
+    [INFO] Model INPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 11544 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1: 296
+    [INFO] 		2:  39
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.110316
+    [INFO] ZeroPoint[0] = -11
+    [INFO] Model OUTPUT tensors:
+    [INFO] 	tensor type is INT8
+    [INFO] 	tensor occupies 4292 bytes with dimensions
+    [INFO] 		0:   1
+    [INFO] 		1:   1
+    [INFO] 		2: 148
+    [INFO] 		3:  29
+    [INFO] Quant dimension: 0
+    [INFO] Scale[0] = 0.003906
+    [INFO] ZeroPoint[0] = -128
+    [INFO] Activation buffer (a.k.a tensor arena) size used: 809808
+    [INFO] Number of operators: 1
+    [INFO] 	Operator 0: ethos-u
+    ```
+
+5. “List” menu option prints a list of pair ... indexes - the original filenames embedded in the application:
+
+    ```log
+    [INFO] List of Files:
+    [INFO] 0 => yesnogostop.wav
+    ```
+
+### Running Keyword Spotting and Automatic Speech Recognition
+
+Please select the first menu option to execute Keyword Spotting and Automatic Speech Recognition.
+
+The following example illustrates application output:
+
+```log
+[INFO] KWS audio data window size 16000
+[INFO] Running KWS inference on audio clip 0 => yesnogostop.wav
+[INFO] Inference 1/7
+[INFO] Profile for Inference:
+	Active NPU cycles: 0
+	Idle NPU cycles:   6
+
+[INFO] For timestamp: 0.000000 (inference #: 0); threshold: 0.900000
+[INFO] 		label @ 0: yes, score: 0.996094
+[INFO] Keyword spotted
+[INFO] Inference 1/2
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924742
+	Idle NPU cycles:   424
+
+[INFO] Inference 2/2
+[INFO] Profile for Inference:
+	Active NPU cycles: 28924740
+	Idle NPU cycles:   426
+
+[INFO] Result for inf 0: no gow
+[INFO] Result for inf 1:  stoppe
+[INFO] Final result: no gow stoppe
+```
+
+It could take several minutes to complete one inference run (average time is 2-3 minutes).
+
+Using the input “yesnogostop.wav”, the log shows inference results for the KWS operation first, detecting the
+trigger word “yes“ with the stated probability score (in this case 0.99). After this, the ASR inference is run,
+printing the words recognized from the input sample.
+
+The profiling section of the log shows that for the ASR inference:
+
+- Ethos-U55's PMU report:
+
+  - 28,924,740 active cycles: number of cycles that were used for computation
+
+  - 426 idle cycles: number of cycles for which the NPU was idle
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+the CPU model is not cycle-approximate or cycle-accurate.
+
+    Note that in this example the KWS inference does not use the Ethos-U55 and is run purely on CPU, therefore 0 Active
+    NPU cycles is shown.
diff --git a/model_conditioning_examples/Readme.md b/model_conditioning_examples/Readme.md
new file mode 100644
index 0000000..ede2c24
--- /dev/null
+++ b/model_conditioning_examples/Readme.md
@@ -0,0 +1,173 @@
+# Model conditioning examples
+
+- [Introduction](#introduction)
+  - [How to run](#how-to-run)
+- [Quantization](#quantization)
+  - [Post-training quantization](#post-training-quantization)
+  - [Quantization aware training](#quantization-aware-training)
+- [Weight pruning](#weight-pruning)
+- [Weight clustering](#weight-clustering)
+- [References](#references)
+
+## Introduction
+
+This folder contains short example scripts that demonstrate some methods available in TensorFlow to condition your model
+in preparation for deployment on Arm Ethos NPU.
+
+These scripts will cover three main topics:
+
+- Quantization
+- Weight clustering
+- Weight pruning
+
+The objective of these scripts is not to be a single source of knowledge on everything related to model conditioning.
+Instead the aim is to provide the reader with a quick starting point that demonstrates some commonly used tools that
+will enable models to run on Arm Ethos NPU and also optimize them to enable maximum performance from the Arm Ethos NPU.
+
+Links to more in-depth guides available on the TensorFlow website are provided in the [references](#references) section
+in this Readme.
+
+### How to run
+
+From the `model_conditioning_examples` folder run the following command:
+
+```commandline
+./setup.sh
+```
+
+This will create a Python virtual environment and install the required versions of TensorFlow and TensorFlow model
+optimization toolkit to run the examples scripts.
+
+If the virtual environment has not been activated you can do so by running:
+
+```commandline
+source ./env/bin/activate
+```
+
+You can then run the examples from the command line. For example to run the post-training quantization example:
+
+```commandline
+python ./post_training_quantization.py
+```
+
+The produced TensorFlow Lite model files will be saved in a `conditioned_models` sub-folder.
+
+## Quantization
+
+Most machine learning models are trained using 32bit floating point precision. However, Arm Ethos NPU performs
+calculations in 8bit integer precision. As a result, it is required that any model you wish to deploy on Arm Ethos NPU is
+first fully quantized to 8bits.
+
+TensorFlow provides two methods of quantization and the scripts in this folder will demonstrate these:
+
+- [Post-training quantization](./post_training_quantization.py)
+- [Quantization aware training](./quantization_aware_training.py)
+
+Both of these techniques will not only quantize weights of the the model but also the variable tensors such as model
+input and output, and the activations of each intermediate layer.
+
+For details on the quantization specification used by TensorFlow please see
+[here](https://www.tensorflow.org/lite/performance/quantization_spec).
+
+In both methods scale and zero point values are chosen to allow the floating point weights to be maximally
+represented in this reduced precision. Quantization is performed per-axis, meaning a different scale and zero point
+is used for each channel of a layer's weights.
+
+### Post-training quantization
+
+The first of the quantization methods that will be covered is called post-training quantization. As the name suggests
+this form of quantization takes place after training of your model is complete. It is also the simpler of the methods
+we will show to quantize a model.
+
+In post-training quantization, first the weights of the model are quantized to 8bit integer values. After this we
+quantize the variable tensors, such as layer activations. To do this we need to calculate the potential range of values
+that all these tensors can take.
+
+Calculating these ranges requires a small dataset that is representative of what you expect your model to see when
+it is deployed. Model inference is then performed using this representative dataset and the resulting minimum and
+maximum values for variable tensors are calculated.
+
+Only a small number of samples need to be used in this calibration dataset (around 100-500 should be enough). These
+samples can be taken from the training or validation sets.
+
+Quantizing your model can result in accuracy drops depending on your model. However for a lot of use cases the accuracy
+drop when using post-training quantization is usually minimal. After post-training quantization is complete you will
+have a fully quantized TensorFlow Lite model.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+### Quantization aware training
+
+Depending on the model, the use of post-training quantization can result in an accuracy drop that is too large to be
+considered suitable. This is where quantization aware training can be used to improve things. Quantization aware
+training simulates the quantization of weights and activations during the inference stage of training using fake
+quantization nodes.
+
+By simulating quantization during training, the model weights will be adjusted in the backward pass so that they are
+better suited for the reduced precision of quantization. It is this simulating of quantization and adjusting of weights
+that can minimize accuracy loss incurred when quantizing. Note that quantization is only simulated
+at this stage and backward passes of training are still performed in full floating point precision.
+
+Importantly, with quantization aware training you do not have to train your model from scratch to use it. Instead, you
+can train it normally (not quantization aware) and after training is complete you can then fine-tune it using
+quantization aware training. By only fine-tuning you can save a lot of training time.
+
+As well as simulating quantization and adjusting weights, the ranges for variable tensors are captured so that the
+model can be fully quantized afterwards. Once you have finished quantization aware training the TensorFlow Lite converter is
+used to produce a fully quantized TensorFlow Lite model.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+## Weight pruning
+
+After you have trained your deep learning model it is common to see that many of the weights in the model
+have the value of 0, and also have many values very close to 0. These weights have very little effect in network
+calculations so are safe to be removed or 'pruned' from the model. This is accomplished by setting all these weight
+values to 0, resulting in a sparse model.
+
+Compression algorithms can then take advantage of this to reduce model size in memory, which can be very important when
+deploying on small embedded systems. Moreover, Arm Ethos NPU can take advantage of model sparsity to further accelerate
+execution of a model.
+
+Training with weight pruning will force your model to have a certain percentage of its weights set (or 'pruned') to 0
+during the training phase. This is done by forcing those that are closest to 0 to become 0. Doing it during training
+guarantees your model will have a certain level of sparsity and the weights of your model can also be better adapted
+to the sparsity level chosen. This means, accuracy loss will hopefully be minimized if a large pruning percentage
+is desired.
+
+Weight pruning can be further combined with quantization so you have a model that is both pruned and quantized, meaning
+that the memory saving affects of both can be combined. Quantization then allows the model to be used with
+Arm Ethos NPU.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+## Weight clustering
+
+Another method of model conditioning is weight clustering (also called weight sharing). With this technique, a fixed
+number of values (cluster centers) are used in each layer of a model to represent all the possible values that the
+layer's weights take. The weights in a layer will then use the value of their closest cluster center. By restricting
+the number of possible clusters, weight clustering reduces the amount of memory needed to store all the weight values
+in a model.
+
+Depending on the model and number of clusters chosen, using this kind of technique can have a negative effect on
+accuracy. To reduce the impact on accuracy you can introduce clustering during training so the models weights can be
+better adjusted to the reduced precision.
+
+Weight clustering can be further combined with quantization so you have a model that is both clustered and quantized,
+meaning that the memory saving affects of both can be combined. Quantization then allows the model to be used with
+Arm Ethos NPU.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used (see [Optimize model with Vela compiler](./building.md#optimize-custom-model-with-vela-compiler)).
+
+## References
+
+- [TensorFlow Model Optimization Toolkit](https://www.tensorflow.org/model_optimization)
+- [Post-training quantization](https://www.tensorflow.org/lite/performance/post_training_integer_quant)
+- [Quantization aware training](https://www.tensorflow.org/model_optimization/guide/quantization/training)
+- [Weight pruning](https://www.tensorflow.org/model_optimization/guide/pruning)
+- [Weight clustering](https://www.tensorflow.org/model_optimization/guide/clustering)
+- [Vela](https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/)
diff --git a/model_conditioning_examples/post_training_quantization.py b/model_conditioning_examples/post_training_quantization.py
new file mode 100644
index 0000000..ab535ac
--- /dev/null
+++ b/model_conditioning_examples/post_training_quantization.py
@@ -0,0 +1,139 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with an example of how to perform post-training quantization in TensorFlow.
+
+The output from this example will be a TensorFlow Lite model file where weights and activations are quantized to 8bit
+integer values.
+
+Quantization helps reduce the size of your models and is necessary for running models on certain hardware such as Arm
+Ethos NPU.
+
+In addition to quantizing weights, post-training quantization uses a calibration dataset to
+capture the minimum and maximum values of all variable tensors in your model.
+By capturing these ranges it is possible to fully quantize not just the weights of the model but also the activations.
+
+Depending on the model you are quantizing there may be some accuracy loss, but for a lot of models the loss should
+be minimal.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using Vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on post-training quantization
+see: https://www.tensorflow.org/lite/performance/post_training_integer_quant
+"""
+import pathlib
+
+import numpy as np
+import tensorflow as tf
+
+from training_utils import get_data, create_model
+
+
+def post_training_quantize(keras_model, sample_data):
+    """Quantize Keras model using post-training quantization with some sample data.
+
+    TensorFlow Lite will have fp32 inputs/outputs and the model will handle quantizing/dequantizing.
+
+    Args:
+        keras_model: Keras model to quantize.
+        sample_data: A numpy array of data to use as a representative dataset.
+
+    Returns:
+        Quantized TensorFlow Lite model.
+    """
+
+    converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
+
+    # We set the following converter options to ensure our model is fully quantized.
+    # An error should get thrown if there is any ops that can't be quantized.
+    converter.optimizations = [tf.lite.Optimize.DEFAULT]
+    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
+
+    # To use post training quantization we must provide some sample data that will be used to
+    # calculate activation ranges for quantization. This data should be representative of the data
+    # we expect to feed the model and must be provided by a generator function.
+    def generate_repr_dataset():
+        for i in range(100):  # 100 samples is all we should need in this example.
+            yield [np.expand_dims(sample_data[i], axis=0)]
+
+    converter.representative_dataset = generate_repr_dataset
+    tflite_model = converter.convert()
+
+    return tflite_model
+
+
+def evaluate_tflite_model(tflite_save_path, x_test, y_test):
+    """Calculate the accuracy of a TensorFlow Lite model using TensorFlow Lite interpreter.
+
+    Args:
+        tflite_save_path: Path to TensorFlow Lite model to test.
+        x_test: numpy array of testing data.
+        y_test: numpy array of testing labels (sparse categorical).
+    """
+
+    interpreter = tf.lite.Interpreter(model_path=str(tflite_save_path))
+
+    interpreter.allocate_tensors()
+    input_details = interpreter.get_input_details()
+    output_details = interpreter.get_output_details()
+
+    accuracy_count = 0
+    num_test_images = len(y_test)
+
+    for i in range(num_test_images):
+        interpreter.set_tensor(input_details[0]['index'], x_test[i][np.newaxis, ...])
+        interpreter.invoke()
+        output_data = interpreter.get_tensor(output_details[0]['index'])
+
+        if np.argmax(output_data) == y_test[i]:
+            accuracy_count += 1
+
+    print(f"Test accuracy quantized: {accuracy_count / num_test_images:.3f}")
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # Compile and train the model in fp32 as normal.
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                  loss=tf.keras.losses.sparse_categorical_crossentropy,
+                  metrics=['accuracy'])
+
+    model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the fp32 model accuracy.
+    test_loss, test_acc = model.evaluate(x_test, y_test)
+    print(f"Test accuracy float: {test_acc:.3f}")
+
+    # Quantize and export the resulting TensorFlow Lite model to file.
+    tflite_model = post_training_quantize(model, x_train)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    quant_model_save_path = tflite_models_dir / 'post_training_quant_model.tflite'
+    with open(quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test the quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/model_conditioning_examples/quantization_aware_training.py b/model_conditioning_examples/quantization_aware_training.py
new file mode 100644
index 0000000..acb768c
--- /dev/null
+++ b/model_conditioning_examples/quantization_aware_training.py
@@ -0,0 +1,139 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with a short example of how to perform quantization aware training in TensorFlow using the
+TensorFlow Model Optimization Toolkit.
+
+The output from this example will be a TensorFlow Lite model file where weights and activations are quantized to 8bit
+integer values.
+
+Quantization helps reduce the size of your models and is necessary for running models on certain hardware such as Arm
+Ethos NPU.
+
+In quantization aware training (QAT), the error introduced with quantizing from fp32 to int8 is simulated using
+fake quantization nodes. By simulating this quantization error when training, the model can learn better adapted
+weights and minimize accuracy losses caused by the reduced precision.
+
+Minimum and maximum values for activations are also captured during training so activations for every layer can be
+quantized along with the weights later.
+
+Quantization is only simulated during training and the training backward passes are still performed in full float
+precision. Actual quantization happens when generating a TensorFlow Lite model.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on quantization aware training
+see: https://www.tensorflow.org/model_optimization/guide/quantization/training
+"""
+import pathlib
+
+import numpy as np
+import tensorflow as tf
+import tensorflow_model_optimization as tfmot
+
+from training_utils import get_data, create_model
+
+
+def quantize_and_convert_to_tflite(keras_model):
+    """Quantize and convert Keras model trained with QAT to TensorFlow Lite.
+
+    TensorFlow Lite will have fp32 inputs/outputs and the model will handle quantizing/dequantizing.
+
+    Args:
+        keras_model: Keras model trained with quantization aware training.
+
+    Returns:
+        Quantized TensorFlow Lite model.
+    """
+
+    converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
+
+    # After doing quantization aware training all the information for creating a fully quantized
+    # TensorFlow Lite model is already within the quantization aware Keras model.
+    # This means we only need to call convert with default optimizations to generate the quantized TensorFlow Lite model.
+    converter.optimizations = [tf.lite.Optimize.DEFAULT]
+    tflite_model = converter.convert()
+
+    return tflite_model
+
+
+def evaluate_tflite_model(tflite_save_path, x_test, y_test):
+    """Calculate the accuracy of a TensorFlow Lite model using TensorFlow Lite interpreter.
+
+    Args:
+        tflite_save_path: Path to TensorFlow Lite model to test.
+        x_test: numpy array of testing data.
+        y_test: numpy array of testing labels (sparse categorical).
+    """
+
+    interpreter = tf.lite.Interpreter(model_path=str(tflite_save_path))
+
+    interpreter.allocate_tensors()
+    input_details = interpreter.get_input_details()
+    output_details = interpreter.get_output_details()
+
+    accuracy_count = 0
+    num_test_images = len(y_test)
+
+    for i in range(num_test_images):
+        interpreter.set_tensor(input_details[0]['index'], x_test[i][np.newaxis, ...])
+        interpreter.invoke()
+        output_data = interpreter.get_tensor(output_details[0]['index'])
+
+        if np.argmax(output_data) == y_test[i]:
+            accuracy_count += 1
+
+    print(f"Test accuracy quantized: {accuracy_count / num_test_images:.3f}")
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # When working with the TensorFlow Keras API and the TF Model Optimization Toolkit we can make our
+    # model quantization aware in one line. Once this is done we compile the model and train as normal.
+    # It is important to note that the model is only quantization aware and is not quantized yet. The weights are
+    # still floating point and will only be converted to int8 when we generate the TensorFlow Lite model later on.
+    quant_aware_model = tfmot.quantization.keras.quantize_model(model)
+
+    quant_aware_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                              loss=tf.keras.losses.sparse_categorical_crossentropy,
+                              metrics=['accuracy'])
+
+    quant_aware_model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the quantization aware model accuracy.
+    test_loss, test_acc = quant_aware_model.evaluate(x_test, y_test)
+    print(f"Test accuracy quant aware: {test_acc:.3f}")
+
+    # Quantize and save the resulting TensorFlow Lite model to file.
+    tflite_model = quantize_and_convert_to_tflite(quant_aware_model)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    quant_model_save_path = tflite_models_dir / 'qat_quant_model.tflite'
+    with open(quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/model_conditioning_examples/requirements.txt b/model_conditioning_examples/requirements.txt
new file mode 100644
index 0000000..96e15a3
--- /dev/null
+++ b/model_conditioning_examples/requirements.txt
@@ -0,0 +1,3 @@
+tensorflow==2.4.0
+tensorflow-model-optimization==0.5.0
+numpy==1.19.5
\ No newline at end of file
diff --git a/model_conditioning_examples/setup.sh b/model_conditioning_examples/setup.sh
new file mode 100644
index 0000000..f552662
--- /dev/null
+++ b/model_conditioning_examples/setup.sh
@@ -0,0 +1,21 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+#!/bin/bash
+python3 -m venv ./env
+source ./env/bin/activate
+pip install -U pip
+pip install -r requirements.txt
\ No newline at end of file
diff --git a/model_conditioning_examples/training_utils.py b/model_conditioning_examples/training_utils.py
new file mode 100644
index 0000000..3467b2a
--- /dev/null
+++ b/model_conditioning_examples/training_utils.py
@@ -0,0 +1,61 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+Utility functions related to data and models that are common to all the model conditioning examples.
+"""
+import tensorflow as tf
+import numpy as np
+
+
+def get_data():
+    """Downloads and returns the pre-processed data and labels for training and testing.
+
+    Returns:
+        Tuple of: (train data, train labels, test data, test labels)
+    """
+
+    # To save time we use the MNIST dataset for this example.
+    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
+
+    # Convolution operations require data to have 4 dimensions.
+    # We divide by 255 to help training and cast to float32 for TensorFlow.
+    x_train = (x_train[..., np.newaxis] / 255.0).astype(np.float32)
+    x_test = (x_test[..., np.newaxis] / 255.0).astype(np.float32)
+
+    return x_train, y_train, x_test, y_test
+
+
+def create_model():
+    """Create and returns a simple Keras model for training MNIST.
+
+    We will use a simple convolutional neural network for this example,
+    but the model optimization methods employed should be compatible with a
+    wide variety of CNN architectures such as Mobilenet and Inception etc.
+
+    Returns:
+        Uncompiled Keras model.
+    """
+
+    keras_model = tf.keras.models.Sequential([
+        tf.keras.layers.Conv2D(32, 3, padding='same', input_shape=(28, 28, 1), activation=tf.nn.relu),
+        tf.keras.layers.Conv2D(32, 3, padding='same', activation=tf.nn.relu),
+        tf.keras.layers.MaxPool2D(),
+        tf.keras.layers.Conv2D(32, 3, padding='same', activation=tf.nn.relu),
+        tf.keras.layers.MaxPool2D(),
+        tf.keras.layers.Flatten(),
+        tf.keras.layers.Dense(units=10, activation=tf.nn.softmax)
+    ])
+
+    return keras_model
diff --git a/model_conditioning_examples/weight_clustering.py b/model_conditioning_examples/weight_clustering.py
new file mode 100644
index 0000000..54f241c
--- /dev/null
+++ b/model_conditioning_examples/weight_clustering.py
@@ -0,0 +1,107 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with a short example of how to perform clustering of weights (weight sharing) in
+TensorFlow using the TensorFlow Model Optimization Toolkit.
+
+The output from this example will be a TensorFlow Lite model file where weights in each layer have been 'clustered' into
+16 clusters during training - quantization has then been applied on top of this.
+
+By clustering the model we can improve compression of the model file. This can be essential for deploying certain
+models on systems with limited resources - such as embedded systems using an Arm Ethos NPU.
+
+After performing clustering we do post-training quantization to quantize the model and then generate a TensorFlow Lite file.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using Vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on clustering see: https://www.tensorflow.org/model_optimization/guide/clustering
+"""
+import pathlib
+
+import tensorflow as tf
+import tensorflow_model_optimization as tfmot
+
+from training_utils import get_data, create_model
+from post_training_quantization import post_training_quantize, evaluate_tflite_model
+
+
+def prepare_for_clustering(keras_model):
+    """Prepares a Keras model for clustering."""
+
+    # Choose the number of clusters to use and how to initialize them. Using more clusters will generally
+    # reduce accuracy so you will need to find the optimal number for your use-case.
+    number_of_clusters = 16
+    cluster_centroids_init = tfmot.clustering.keras.CentroidInitialization.LINEAR
+
+    # Apply the clustering wrapper to the whole model so weights in every layer will get clustered. You may find that
+    # to avoid too much accuracy loss only certain non-critical layers in your model should be clustered.
+    clustering_ready_model = tfmot.clustering.keras.cluster_weights(keras_model,
+                                                                    number_of_clusters=number_of_clusters,
+                                                                    cluster_centroids_init=cluster_centroids_init)
+
+    # We must recompile the model after making it ready for clustering.
+    clustering_ready_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                                   loss=tf.keras.losses.sparse_categorical_crossentropy,
+                                   metrics=['accuracy'])
+
+    return clustering_ready_model
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # Compile and train the model first.
+    # In general it is easier to do clustering as a fine-tuning step after the model is fully trained.
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                  loss=tf.keras.losses.sparse_categorical_crossentropy,
+                  metrics=['accuracy'])
+
+    model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the trained model accuracy.
+    test_loss, test_acc = model.evaluate(x_test, y_test)
+    print(f"Test accuracy before clustering: {test_acc:.3f}")
+
+    # Prepare the model for clustering.
+    clustered_model = prepare_for_clustering(model)
+
+    # Continue training the model but now with clustering applied.
+    clustered_model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, verbose=1, shuffle=True)
+    test_loss, test_acc = clustered_model.evaluate(x_test, y_test)
+    print(f"Test accuracy after clustering: {test_acc:.3f}")
+
+    # Remove all variables that clustering only needed in the training phase.
+    model_for_export = tfmot.clustering.keras.strip_clustering(clustered_model)
+
+    # Apply post-training quantization on top of the clustering and save the resulting TensorFlow Lite model to file.
+    tflite_model = post_training_quantize(model_for_export, x_train)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    clustered_quant_model_save_path = tflite_models_dir / 'clustered_post_training_quant_model.tflite'
+    with open(clustered_quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test the clustered quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(clustered_quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/model_conditioning_examples/weight_pruning.py b/model_conditioning_examples/weight_pruning.py
new file mode 100644
index 0000000..bf26f1f
--- /dev/null
+++ b/model_conditioning_examples/weight_pruning.py
@@ -0,0 +1,106 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+This script will provide you with a short example of how to perform magnitude-based weight pruning in TensorFlow
+using the TensorFlow Model Optimization Toolkit.
+
+The output from this example will be a TensorFlow Lite model file where ~75% percent of the weights have been 'pruned' to the
+value 0 during training - quantization has then been applied on top of this.
+
+By pruning the model we can improve compression of the model file. This can be essential for deploying certain models
+on systems with limited resources - such as embedded systems using Arm Ethos NPU. Also, if the pruned model is run
+on an Arm Ethos NPU then this pruning can improve the execution time of the model.
+
+After pruning is complete we do post-training quantization to quantize the model and then generate a TensorFlow Lite file.
+
+If you are targetting an Arm Ethos-U55 NPU then the output TensorFlow Lite file will also need to be passed through the Vela
+compiler for further optimizations before it can be used.
+
+For more information on using Vela see: https://git.mlplatform.org/ml/ethos-u/ethos-u-vela.git/about/
+For more information on weight pruning see: https://www.tensorflow.org/model_optimization/guide/pruning
+"""
+import pathlib
+
+import tensorflow as tf
+import tensorflow_model_optimization as tfmot
+
+from training_utils import get_data, create_model
+from post_training_quantization import post_training_quantize, evaluate_tflite_model
+
+
+def prepare_for_pruning(keras_model):
+    """Prepares a Keras model for pruning."""
+
+    # We use a constant sparsity schedule so the amount of sparsity in the model is kept at the same percent throughout
+    # training. An alternative is PolynomialDecay where sparsity can be gradually increased during training.
+    pruning_schedule = tfmot.sparsity.keras.ConstantSparsity(target_sparsity=0.75, begin_step=0)
+
+    # Apply the pruning wrapper to the whole model so weights in every layer will get pruned. You may find that to avoid
+    # too much accuracy loss only certain non-critical layers in your model should be pruned.
+    pruning_ready_model = tfmot.sparsity.keras.prune_low_magnitude(keras_model, pruning_schedule=pruning_schedule)
+
+    # We must recompile the model after making it ready for pruning.
+    pruning_ready_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                                loss=tf.keras.losses.sparse_categorical_crossentropy,
+                                metrics=['accuracy'])
+
+    return pruning_ready_model
+
+
+def main():
+    x_train, y_train, x_test, y_test = get_data()
+    model = create_model()
+
+    # Compile and train the model first.
+    # In general it is easier to do pruning as a fine-tuning step after the model is fully trained.
+    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
+                  loss=tf.keras.losses.sparse_categorical_crossentropy,
+                  metrics=['accuracy'])
+
+    model.fit(x=x_train, y=y_train, batch_size=128, epochs=5, verbose=1, shuffle=True)
+
+    # Test the trained model accuracy.
+    test_loss, test_acc = model.evaluate(x_test, y_test)
+    print(f"Test accuracy before pruning: {test_acc:.3f}")
+
+    # Prepare the model for pruning and add the pruning update callback needed in training.
+    pruned_model = prepare_for_pruning(model)
+    callbacks = [tfmot.sparsity.keras.UpdatePruningStep()]
+
+    # Continue training the model but now with pruning applied - remember to pass in the callbacks!
+    pruned_model.fit(x=x_train, y=y_train, batch_size=128, epochs=1, verbose=1, shuffle=True, callbacks=callbacks)
+    test_loss, test_acc = pruned_model.evaluate(x_test, y_test)
+    print(f"Test accuracy after pruning: {test_acc:.3f}")
+
+    # Remove all variables that pruning only needed in the training phase.
+    model_for_export = tfmot.sparsity.keras.strip_pruning(pruned_model)
+
+    # Apply post-training quantization on top of the pruning and save the resulting TensorFlow Lite model to file.
+    tflite_model = post_training_quantize(model_for_export, x_train)
+
+    tflite_models_dir = pathlib.Path('./conditioned_models/')
+    tflite_models_dir.mkdir(exist_ok=True, parents=True)
+
+    pruned_quant_model_save_path = tflite_models_dir / 'pruned_post_training_quant_model.tflite'
+    with open(pruned_quant_model_save_path, 'wb') as f:
+        f.write(tflite_model)
+
+    # Test the pruned quantized model accuracy. Save time by only testing a subset of the whole data.
+    num_test_samples = 1000
+    evaluate_tflite_model(pruned_quant_model_save_path, x_test[0:num_test_samples], y_test[0:num_test_samples])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/release_notes.txt b/release_notes.txt
new file mode 100644
index 0000000..13c40a7
--- /dev/null
+++ b/release_notes.txt
@@ -0,0 +1,37 @@
+Changes in 21.03
+    * simple platform support added
+    * model conditioning examples added
+    * documentation updated
+    * build changed to use sources of the dependency libraries
+    * tests for native platform added
+    * anomaly detection use case added
+
+Changes in 20.11
+    * SSE-200 and SSE-300 system support was added.
+    * Support for simple fixed virtual platform for Ethos-U55 and Cortex-M55 removed.
+    * Build cmake parameters changed: TARGET_SUBSYSTEM was added, TARGET_PLATFORM accepted values were changed.
+    * Models with multiple output tensors support was added.
+    * Generic inference runner use-case added.
+    * ASR triggered by KWS added in the same use case (kws_asr). This also shows how to re-use tensor arena with two models using the same memory pool.
+
+Changes in 20.09 release:
+    * Support for TensorFlow Lite Micro version > 2.3.0 (tested with TensorFlow Lite Micro 2.4.0 commit hash: 5bbb8a2bd1def6865b1510175a3da5fd12387e10)
+    * Added speech recognition use case example.
+    * Updated Ethos-U55 Fastmodel version to r0p2-00eac0-rc4
+
+Changes in 20.08 release:
+    * Added keyword spotting use case example.
+    * Added person detection use case example.
+Known issues:
+    * telnet connection to FastModel environment may hang after some period of inactivity.
+
+Changes in 20.05 release:
+    * FastModel environment was built with FastModel Tools v11.10.22.
+    * Mps3 FPGA build support was added.
+    * Configurable timing-adaptor.
+    * Added Active and Idle cycle counts for NPU and CPU profiling report.
+    * Source code structure and build scripts refactored to support multiple ML use-cases.
+    * Used EAC Ethos-U55 software model and drivers.
+    * Windows support for build scripts.
+Known issues:
+    * telnet connection to FastModel environment may hang after some period of inactivity.
\ No newline at end of file
diff --git a/resources/LICENSE_CC_1.0.txt b/resources/LICENSE_CC_1.0.txt
new file mode 100644
index 0000000..d727803
--- /dev/null
+++ b/resources/LICENSE_CC_1.0.txt
@@ -0,0 +1,51 @@
+Creative Commons Attribution 1.0
+
+=======================================================================
+
+CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DRAFT LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.
+
+License
+
+THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE IS PROHIBITED.
+
+BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+
+    1. Definitions
+        a. "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License.
+        b. "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License.
+        c. "Licensor" means the individual or entity that offers the Work under the terms of this License.
+        d. "Original Author" means the individual or entity who created the Work.
+        e. "Work" means the copyrightable work of authorship offered under the terms of this License.
+        f. "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation.
+    2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws.
+    3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below:
+        a. to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works;
+        b. to create and reproduce Derivative Works;
+        c. to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works;
+        d. to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works;
+
+        The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved.
+    4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:
+        a. You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any reference to such Licensor or the Original Author, as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any reference to such Licensor or the Original Author, as requested.
+        b. If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and give the Original Author credit reasonable to the medium or means You are utilizing by conveying the name (or pseudonym if applicable) of the Original Author if supplied; the title of the Work if supplied; in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit.
+    5. Representations, Warranties and Disclaimer
+        a. By offering the Work for public release under this License, Licensor represents and warrants that, to the best of Licensor's knowledge after reasonable inquiry:
+            i. Licensor has secured all rights in the Work necessary to grant the license rights hereunder and to permit the lawful exercise of the rights granted hereunder without You having any obligation to pay any royalties, compulsory license fees, residuals or any other payments;
+            ii. The Work does not infringe the copyright, trademark, publicity rights, common law rights or any other right of any third party or constitute defamation, invasion of privacy or other tortious injury to any third party.
+        b. EXCEPT AS EXPRESSLY STATED IN THIS LICENSE OR OTHERWISE AGREED IN WRITING OR REQUIRED BY APPLICABLE LAW, THE WORK IS LICENSED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES REGARDING THE CONTENTS OR ACCURACY OF THE WORK.
+    6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, AND EXCEPT FOR DAMAGES ARISING FROM LIABILITY TO A THIRD PARTY RESULTING FROM BREACH OF THE WARRANTIES IN SECTION 5, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+    7. Termination
+        a. This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.
+        b. Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above.
+    8. Miscellaneous
+        a. Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License.
+        b. Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License.
+        c. If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
+        d. No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent.
+        e. This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You.
+
+Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor.
+
+Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, neither party will use the trademark "Creative Commons" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time.
+
+Creative Commons may be contacted at http://creativecommons.org/.
diff --git a/resources/LICENSE_CC_4.0.txt b/resources/LICENSE_CC_4.0.txt
new file mode 100644
index 0000000..b877574
--- /dev/null
+++ b/resources/LICENSE_CC_4.0.txt
@@ -0,0 +1,397 @@
+Creative Commons
+
+Attribution 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+     Considerations for licensors: Our public licenses are
+     intended for use by those authorized to give the public
+     permission to use material in ways otherwise restricted by
+     copyright and certain other rights. Our licenses are
+     irrevocable. Licensors should read and understand the terms
+     and conditions of the license they choose before applying it.
+     Licensors should also secure all rights necessary before
+     applying our licenses so that the public can reuse the
+     material as expected. Licensors should clearly mark any
+     material not subject to the license. This includes other CC-
+     licensed material, or material used under an exception or
+     limitation to copyright. More considerations for licensors:
+   wiki.creativecommons.org/Considerations_for_licensors
+
+     Considerations for the public: By using one of our public
+     licenses, a licensor grants the public permission to use the
+     licensed material under specified terms and conditions. If
+     the licensor's permission is not necessary for any reason--for
+     example, because of any applicable exception or limitation to
+     copyright--then that use is not regulated by the license. Our
+     licenses grant only permissions under copyright and certain
+     other rights that a licensor has authority to grant. Use of
+     the licensed material may still be restricted for other
+     reasons, including because others have copyright or other
+     rights in the material. A licensor may make special requests,
+     such as asking that all changes be marked or described.
+     Although not required by our licenses, you are encouraged to
+     respect those requests where reasonable. More_considerations
+     for the public:
+   wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution 4.0 International Public License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution 4.0 International Public License ("Public License"). To the
+extent this Public License may be interpreted as a contract, You are
+granted the Licensed Rights in consideration of Your acceptance of
+these terms and conditions, and the Licensor grants You such rights in
+consideration of benefits the Licensor receives from making the
+Licensed Material available under these terms and conditions.
+
+
+Section 1 -- Definitions.
+
+  a. Adapted Material means material subject to Copyright and Similar
+     Rights that is derived from or based upon the Licensed Material
+     and in which the Licensed Material is translated, altered,
+     arranged, transformed, or otherwise modified in a manner requiring
+     permission under the Copyright and Similar Rights held by the
+     Licensor. For purposes of this Public License, where the Licensed
+     Material is a musical work, performance, or sound recording,
+     Adapted Material is always produced where the Licensed Material is
+     synched in timed relation with a moving image.
+
+  b. Adapter's License means the license You apply to Your Copyright
+     and Similar Rights in Your contributions to Adapted Material in
+     accordance with the terms and conditions of this Public License.
+
+  c. Copyright and Similar Rights means copyright and/or similar rights
+     closely related to copyright including, without limitation,
+     performance, broadcast, sound recording, and Sui Generis Database
+     Rights, without regard to how the rights are labeled or
+     categorized. For purposes of this Public License, the rights
+     specified in Section 2(b)(1)-(2) are not Copyright and Similar
+     Rights.
+
+  d. Effective Technological Measures means those measures that, in the
+     absence of proper authority, may not be circumvented under laws
+     fulfilling obligations under Article 11 of the WIPO Copyright
+     Treaty adopted on December 20, 1996, and/or similar international
+     agreements.
+
+  e. Exceptions and Limitations means fair use, fair dealing, and/or
+     any other exception or limitation to Copyright and Similar Rights
+     that applies to Your use of the Licensed Material.
+
+  f. Licensed Material means the artistic or literary work, database,
+     or other material to which the Licensor applied this Public
+     License.
+
+  g. Licensed Rights means the rights granted to You subject to the
+     terms and conditions of this Public License, which are limited to
+     all Copyright and Similar Rights that apply to Your use of the
+     Licensed Material and that the Licensor has authority to license.
+
+  h. Licensor means the individual(s) or entity(ies) granting rights
+     under this Public License.
+
+  i. Share means to provide material to the public by any means or
+     process that requires permission under the Licensed Rights, such
+     as reproduction, public display, public performance, distribution,
+     dissemination, communication, or importation, and to make material
+     available to the public including in ways that members of the
+     public may access the material from a place and at a time
+     individually chosen by them.
+
+  j. Sui Generis Database Rights means rights other than copyright
+     resulting from Directive 96/9/EC of the European Parliament and of
+     the Council of 11 March 1996 on the legal protection of databases,
+     as amended and/or succeeded, as well as other essentially
+     equivalent rights anywhere in the world.
+
+  k. You means the individual or entity exercising the Licensed Rights
+     under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+  a. License grant.
+
+       1. Subject to the terms and conditions of this Public License,
+          the Licensor hereby grants You a worldwide, royalty-free,
+          non-sublicensable, non-exclusive, irrevocable license to
+          exercise the Licensed Rights in the Licensed Material to:
+
+            a. reproduce and Share the Licensed Material, in whole or
+               in part; and
+
+            b. produce, reproduce, and Share Adapted Material.
+
+       2. Exceptions and Limitations. For the avoidance of doubt, where
+          Exceptions and Limitations apply to Your use, this Public
+          License does not apply, and You do not need to comply with
+          its terms and conditions.
+
+       3. Term. The term of this Public License is specified in Section
+          6(a).
+
+       4. Media and formats; technical modifications allowed. The
+          Licensor authorizes You to exercise the Licensed Rights in
+          all media and formats whether now known or hereafter created,
+          and to make technical modifications necessary to do so. The
+          Licensor waives and/or agrees not to assert any right or
+          authority to forbid You from making technical modifications
+          necessary to exercise the Licensed Rights, including
+          technical modifications necessary to circumvent Effective
+          Technological Measures. For purposes of this Public License,
+          simply making modifications authorized by this Section 2(a)
+          (4) never produces Adapted Material.
+
+       5. Downstream recipients.
+
+            a. Offer from the Licensor -- Licensed Material. Every
+               recipient of the Licensed Material automatically
+               receives an offer from the Licensor to exercise the
+               Licensed Rights under the terms and conditions of this
+               Public License.
+
+            b. No downstream restrictions. You may not offer or impose
+               any additional or different terms or conditions on, or
+               apply any Effective Technological Measures to, the
+               Licensed Material if doing so restricts exercise of the
+               Licensed Rights by any recipient of the Licensed
+               Material.
+
+       6. No endorsement. Nothing in this Public License constitutes or
+          may be construed as permission to assert or imply that You
+          are, or that Your use of the Licensed Material is, connected
+          with, or sponsored, endorsed, or granted official status by,
+          the Licensor or others designated to receive attribution as
+          provided in Section 3(a)(1)(A)(i).
+
+  b. Other rights.
+
+       1. Moral rights, such as the right of integrity, are not
+          licensed under this Public License, nor are publicity,
+          privacy, and/or other similar personality rights; however, to
+          the extent possible, the Licensor waives and/or agrees not to
+          assert any such rights held by the Licensor to the limited
+          extent necessary to allow You to exercise the Licensed
+          Rights, but not otherwise.
+
+       2. Patent and trademark rights are not licensed under this
+          Public License.
+
+       3. To the extent possible, the Licensor waives any right to
+          collect royalties from You for the exercise of the Licensed
+          Rights, whether directly or through a collecting society
+          under any voluntary or waivable statutory or compulsory
+          licensing scheme. In all other cases the Licensor expressly
+          reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+  a. Attribution.
+
+       1. If You Share the Licensed Material (including in modified
+          form), You must:
+
+            a. retain the following if it is supplied by the Licensor
+               with the Licensed Material:
+
+                 i. identification of the creator(s) of the Licensed
+                    Material and any others designated to receive
+                    attribution, in any reasonable manner requested by
+                    the Licensor (including by pseudonym if
+                    designated);
+
+                ii. a copyright notice;
+
+               iii. a notice that refers to this Public License;
+
+                iv. a notice that refers to the disclaimer of
+                    warranties;
+
+                 v. a URI or hyperlink to the Licensed Material to the
+                    extent reasonably practicable;
+
+            b. indicate if You modified the Licensed Material and
+               retain an indication of any previous modifications; and
+
+            c. indicate the Licensed Material is licensed under this
+               Public License, and include the text of, or the URI or
+               hyperlink to, this Public License.
+
+       2. You may satisfy the conditions in Section 3(a)(1) in any
+          reasonable manner based on the medium, means, and context in
+          which You Share the Licensed Material. For example, it may be
+          reasonable to satisfy the conditions by providing a URI or
+          hyperlink to a resource that includes the required
+          information.
+
+       3. If requested by the Licensor, You must remove any of the
+          information required by Section 3(a)(1)(A) to the extent
+          reasonably practicable.
+
+       4. If You Share Adapted Material You produce, the Adapter's
+          License You apply must not prevent recipients of the Adapted
+          Material from complying with this Public License.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+  a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+     to extract, reuse, reproduce, and Share all or a substantial
+     portion of the contents of the database;
+
+  b. if You include all or a substantial portion of the database
+     contents in a database in which You have Sui Generis Database
+     Rights, then the database in which You have Sui Generis Database
+     Rights (but not its individual contents) is Adapted Material; and
+
+  c. You must comply with the conditions in Section 3(a) if You Share
+     all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+  c. The disclaimer of warranties and limitation of liability provided
+     above shall be interpreted in a manner that, to the extent
+     possible, most closely approximates an absolute disclaimer and
+     waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+  a. This Public License applies for the term of the Copyright and
+     Similar Rights licensed here. However, if You fail to comply with
+     this Public License, then Your rights under this Public License
+     terminate automatically.
+
+  b. Where Your right to use the Licensed Material has terminated under
+     Section 6(a), it reinstates:
+
+       1. automatically as of the date the violation is cured, provided
+          it is cured within 30 days of Your discovery of the
+          violation; or
+
+       2. upon express reinstatement by the Licensor.
+
+     For the avoidance of doubt, this Section 6(b) does not affect any
+     right the Licensor may have to seek remedies for Your violations
+     of this Public License.
+
+  c. For the avoidance of doubt, the Licensor may also offer the
+     Licensed Material under separate terms or conditions or stop
+     distributing the Licensed Material at any time; however, doing so
+     will not terminate this Public License.
+
+  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+     License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+  a. The Licensor shall not be bound by any additional or different
+     terms or conditions communicated by You unless expressly agreed.
+
+  b. Any arrangements, understandings, or agreements regarding the
+     Licensed Material not stated herein are separate from and
+     independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+  a. For the avoidance of doubt, this Public License does not, and
+     shall not be interpreted to, reduce, limit, restrict, or impose
+     conditions on any use of the Licensed Material that could lawfully
+     be made without permission under this Public License.
+
+  b. To the extent possible, if any provision of this Public License is
+     deemed unenforceable, it shall be automatically reformed to the
+     minimum extent necessary to make it enforceable. If the provision
+     cannot be reformed, it shall be severed from this Public License
+     without affecting the enforceability of the remaining terms and
+     conditions.
+
+  c. No term or condition of this Public License will be waived and no
+     failure to comply consented to unless expressly agreed to by the
+     Licensor.
+
+  d. Nothing in this Public License constitutes or may be interpreted
+     as a limitation upon, or waiver of, any privileges and immunities
+     that apply to the Licensor or You, including from the legal
+     processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public
+licenses. Notwithstanding, Creative Commons may elect to apply one of
+its public licenses to material it publishes and in those instances
+will be considered the “Licensor.” The text of the Creative Commons
+public licenses is dedicated to the public domain under the CC0 Public
+Domain Dedication. Except for the limited purpose of indicating that
+material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the
+public licenses.
+
+Creative Commons may be contacted at creativecommons.org.
\ No newline at end of file
diff --git a/resources/ad/labels/placeholder.txt b/resources/ad/labels/placeholder.txt
new file mode 100644
index 0000000..b3a4252
--- /dev/null
+++ b/resources/ad/labels/placeholder.txt
@@ -0,0 +1 @@
+placeholder
\ No newline at end of file
diff --git a/resources/ad/samples/files.md b/resources/ad/samples/files.md
new file mode 100644
index 0000000..95b1479
--- /dev/null
+++ b/resources/ad/samples/files.md
@@ -0,0 +1,17 @@
+# Sample wav audio clip
+
+For this use case sample audio clips aren't provided.
+
+The data used for this application sample comes from
+[https://zenodo.org/record/3384388\#.X6GILFNKiqA](https://zenodo.org/record/3384388\#.X6GILFNKiqA)
+and the model included in this example is trained on the ‘Slider’ part of the dataset.
+
+The machine ID (00, 02, 04, 06) the clip comes from must be in the file name for the application to work.
+
+The file name should have a pattern that matches
+e.g. `<any>_<text>_00_<here>.wav` if the audio was from machine ID 00
+or `<any>_<text>_02_<here>.wav` if it was from machine ID 02 etc.
+For example:
+
+- `anomaly_id_00_00000000.wav`
+- `normal_id_00_00000004.wav`
diff --git a/resources/asr/labels/labels_wav2letter.txt b/resources/asr/labels/labels_wav2letter.txt
new file mode 100644
index 0000000..8fb2fc8
--- /dev/null
+++ b/resources/asr/labels/labels_wav2letter.txt
@@ -0,0 +1,29 @@
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+'
+ 
+$
\ No newline at end of file
diff --git a/resources/asr/samples/anotherdoor.wav b/resources/asr/samples/anotherdoor.wav
new file mode 100644
index 0000000..ee08f06
--- /dev/null
+++ b/resources/asr/samples/anotherdoor.wav
Binary files differ
diff --git a/resources/asr/samples/anotherengineer.wav b/resources/asr/samples/anotherengineer.wav
new file mode 100644
index 0000000..36faef8
--- /dev/null
+++ b/resources/asr/samples/anotherengineer.wav
Binary files differ
diff --git a/resources/asr/samples/files.md b/resources/asr/samples/files.md
new file mode 100644
index 0000000..03b988b
--- /dev/null
+++ b/resources/asr/samples/files.md
@@ -0,0 +1,17 @@
+# Sample wav audio clip
+
+The sample wav audio clips provided are under Creative Commons License (Creative Commons Attribution 4.0 International Public License).
+The source is Librispeech ASR Corpus (http://www.openslr.org/12/)- the files were converted from flac to wav. The files used are listed here for traceability:
+
+- testingroutine.wav (orig - 251-137823-0002.flac)
+  - "This isn't part of your testing routine is it"
+- anotherengineer.wav (orig - 251-137823-0003.flac)
+  - "Another engineer rushed toward the door to see what was happening outside"
+- anotherdoor.wav (orig - 3536-23268-0010.flac)
+  - "And he walked immediately out of the apartment by another door"
+- itellyou.wav (orig - 251-118436-0001.flac)
+  - "I tell you it is not poison she cried"
+
+## License
+
+[Creative Commons Attribution 4.0 International Public License](../../LICENSE_CC_4.0.txt).
diff --git a/resources/asr/samples/itellyou.wav b/resources/asr/samples/itellyou.wav
new file mode 100644
index 0000000..001ce80
--- /dev/null
+++ b/resources/asr/samples/itellyou.wav
Binary files differ
diff --git a/resources/asr/samples/testingroutine.wav b/resources/asr/samples/testingroutine.wav
new file mode 100644
index 0000000..0d8da6b
--- /dev/null
+++ b/resources/asr/samples/testingroutine.wav
Binary files differ
diff --git a/resources/img_class/labels/labels_mobilenet_v2_1.0_224.txt b/resources/img_class/labels/labels_mobilenet_v2_1.0_224.txt
new file mode 100644
index 0000000..0ce2451
--- /dev/null
+++ b/resources/img_class/labels/labels_mobilenet_v2_1.0_224.txt
@@ -0,0 +1,1001 @@
+background
+tench, Tinca tinca
+goldfish, Carassius auratus
+great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
+tiger shark, Galeocerdo cuvieri
+hammerhead, hammerhead shark
+electric ray, crampfish, numbfish, torpedo
+stingray
+cock
+hen
+ostrich, Struthio camelus
+brambling, Fringilla montifringilla
+goldfinch, Carduelis carduelis
+house finch, linnet, Carpodacus mexicanus
+junco, snowbird
+indigo bunting, indigo finch, indigo bird, Passerina cyanea
+robin, American robin, Turdus migratorius
+bulbul
+jay
+magpie
+chickadee
+water ouzel, dipper
+kite
+bald eagle, American eagle, Haliaeetus leucocephalus
+vulture
+great grey owl, great gray owl, Strix nebulosa
+European fire salamander, Salamandra salamandra
+common newt, Triturus vulgaris
+eft
+spotted salamander, Ambystoma maculatum
+axolotl, mud puppy, Ambystoma mexicanum
+bullfrog, Rana catesbeiana
+tree frog, tree-frog
+tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
+loggerhead, loggerhead turtle, Caretta caretta
+leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
+mud turtle
+terrapin
+box turtle, box tortoise
+banded gecko
+common iguana, iguana, Iguana iguana
+American chameleon, anole, Anolis carolinensis
+whiptail, whiptail lizard
+agama
+frilled lizard, Chlamydosaurus kingi
+alligator lizard
+Gila monster, Heloderma suspectum
+green lizard, Lacerta viridis
+African chameleon, Chamaeleo chamaeleon
+Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis
+African crocodile, Nile crocodile, Crocodylus niloticus
+American alligator, Alligator mississipiensis
+triceratops
+thunder snake, worm snake, Carphophis amoenus
+ringneck snake, ring-necked snake, ring snake
+hognose snake, puff adder, sand viper
+green snake, grass snake
+king snake, kingsnake
+garter snake, grass snake
+water snake
+vine snake
+night snake, Hypsiglena torquata
+boa constrictor, Constrictor constrictor
+rock python, rock snake, Python sebae
+Indian cobra, Naja naja
+green mamba
+sea snake
+horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
+diamondback, diamondback rattlesnake, Crotalus adamanteus
+sidewinder, horned rattlesnake, Crotalus cerastes
+trilobite
+harvestman, daddy longlegs, Phalangium opilio
+scorpion
+black and gold garden spider, Argiope aurantia
+barn spider, Araneus cavaticus
+garden spider, Aranea diademata
+black widow, Latrodectus mactans
+tarantula
+wolf spider, hunting spider
+tick
+centipede
+black grouse
+ptarmigan
+ruffed grouse, partridge, Bonasa umbellus
+prairie chicken, prairie grouse, prairie fowl
+peacock
+quail
+partridge
+African grey, African gray, Psittacus erithacus
+macaw
+sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
+lorikeet
+coucal
+bee eater
+hornbill
+hummingbird
+jacamar
+toucan
+drake
+red-breasted merganser, Mergus serrator
+goose
+black swan, Cygnus atratus
+tusker
+echidna, spiny anteater, anteater
+platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus
+wallaby, brush kangaroo
+koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
+wombat
+jellyfish
+sea anemone, anemone
+brain coral
+flatworm, platyhelminth
+nematode, nematode worm, roundworm
+conch
+snail
+slug
+sea slug, nudibranch
+chiton, coat-of-mail shell, sea cradle, polyplacophore
+chambered nautilus, pearly nautilus, nautilus
+Dungeness crab, Cancer magister
+rock crab, Cancer irroratus
+fiddler crab
+king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica
+American lobster, Northern lobster, Maine lobster, Homarus americanus
+spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
+crayfish, crawfish, crawdad, crawdaddy
+hermit crab
+isopod
+white stork, Ciconia ciconia
+black stork, Ciconia nigra
+spoonbill
+flamingo
+little blue heron, Egretta caerulea
+American egret, great white heron, Egretta albus
+bittern
+crane
+limpkin, Aramus pictus
+European gallinule, Porphyrio porphyrio
+American coot, marsh hen, mud hen, water hen, Fulica americana
+bustard
+ruddy turnstone, Arenaria interpres
+red-backed sandpiper, dunlin, Erolia alpina
+redshank, Tringa totanus
+dowitcher
+oystercatcher, oyster catcher
+pelican
+king penguin, Aptenodytes patagonica
+albatross, mollymawk
+grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus
+killer whale, killer, orca, grampus, sea wolf, Orcinus orca
+dugong, Dugong dugon
+sea lion
+Chihuahua
+Japanese spaniel
+Maltese dog, Maltese terrier, Maltese
+Pekinese, Pekingese, Peke
+Shih-Tzu
+Blenheim spaniel
+papillon
+toy terrier
+Rhodesian ridgeback
+Afghan hound, Afghan
+basset, basset hound
+beagle
+bloodhound, sleuthhound
+bluetick
+black-and-tan coonhound
+Walker hound, Walker foxhound
+English foxhound
+redbone
+borzoi, Russian wolfhound
+Irish wolfhound
+Italian greyhound
+whippet
+Ibizan hound, Ibizan Podenco
+Norwegian elkhound, elkhound
+otterhound, otter hound
+Saluki, gazelle hound
+Scottish deerhound, deerhound
+Weimaraner
+Staffordshire bullterrier, Staffordshire bull terrier
+American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier
+Bedlington terrier
+Border terrier
+Kerry blue terrier
+Irish terrier
+Norfolk terrier
+Norwich terrier
+Yorkshire terrier
+wire-haired fox terrier
+Lakeland terrier
+Sealyham terrier, Sealyham
+Airedale, Airedale terrier
+cairn, cairn terrier
+Australian terrier
+Dandie Dinmont, Dandie Dinmont terrier
+Boston bull, Boston terrier
+miniature schnauzer
+giant schnauzer
+standard schnauzer
+Scotch terrier, Scottish terrier, Scottie
+Tibetan terrier, chrysanthemum dog
+silky terrier, Sydney silky
+soft-coated wheaten terrier
+West Highland white terrier
+Lhasa, Lhasa apso
+flat-coated retriever
+curly-coated retriever
+golden retriever
+Labrador retriever
+Chesapeake Bay retriever
+German short-haired pointer
+vizsla, Hungarian pointer
+English setter
+Irish setter, red setter
+Gordon setter
+Brittany spaniel
+clumber, clumber spaniel
+English springer, English springer spaniel
+Welsh springer spaniel
+cocker spaniel, English cocker spaniel, cocker
+Sussex spaniel
+Irish water spaniel
+kuvasz
+schipperke
+groenendael
+malinois
+briard
+kelpie
+komondor
+Old English sheepdog, bobtail
+Shetland sheepdog, Shetland sheep dog, Shetland
+collie
+Border collie
+Bouvier des Flandres, Bouviers des Flandres
+Rottweiler
+German shepherd, German shepherd dog, German police dog, alsatian
+Doberman, Doberman pinscher
+miniature pinscher
+Greater Swiss Mountain dog
+Bernese mountain dog
+Appenzeller
+EntleBucher
+boxer
+bull mastiff
+Tibetan mastiff
+French bulldog
+Great Dane
+Saint Bernard, St Bernard
+Eskimo dog, husky
+malamute, malemute, Alaskan malamute
+Siberian husky
+dalmatian, coach dog, carriage dog
+affenpinscher, monkey pinscher, monkey dog
+basenji
+pug, pug-dog
+Leonberg
+Newfoundland, Newfoundland dog
+Great Pyrenees
+Samoyed, Samoyede
+Pomeranian
+chow, chow chow
+keeshond
+Brabancon griffon
+Pembroke, Pembroke Welsh corgi
+Cardigan, Cardigan Welsh corgi
+toy poodle
+miniature poodle
+standard poodle
+Mexican hairless
+timber wolf, grey wolf, gray wolf, Canis lupus
+white wolf, Arctic wolf, Canis lupus tundrarum
+red wolf, maned wolf, Canis rufus, Canis niger
+coyote, prairie wolf, brush wolf, Canis latrans
+dingo, warrigal, warragal, Canis dingo
+dhole, Cuon alpinus
+African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
+hyena, hyaena
+red fox, Vulpes vulpes
+kit fox, Vulpes macrotis
+Arctic fox, white fox, Alopex lagopus
+grey fox, gray fox, Urocyon cinereoargenteus
+tabby, tabby cat
+tiger cat
+Persian cat
+Siamese cat, Siamese
+Egyptian cat
+cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
+lynx, catamount
+leopard, Panthera pardus
+snow leopard, ounce, Panthera uncia
+jaguar, panther, Panthera onca, Felis onca
+lion, king of beasts, Panthera leo
+tiger, Panthera tigris
+cheetah, chetah, Acinonyx jubatus
+brown bear, bruin, Ursus arctos
+American black bear, black bear, Ursus americanus, Euarctos americanus
+ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
+sloth bear, Melursus ursinus, Ursus ursinus
+mongoose
+meerkat, mierkat
+tiger beetle
+ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
+ground beetle, carabid beetle
+long-horned beetle, longicorn, longicorn beetle
+leaf beetle, chrysomelid
+dung beetle
+rhinoceros beetle
+weevil
+fly
+bee
+ant, emmet, pismire
+grasshopper, hopper
+cricket
+walking stick, walkingstick, stick insect
+cockroach, roach
+mantis, mantid
+cicada, cicala
+leafhopper
+lacewing, lacewing fly
+dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk
+damselfly
+admiral
+ringlet, ringlet butterfly
+monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
+cabbage butterfly
+sulphur butterfly, sulfur butterfly
+lycaenid, lycaenid butterfly
+starfish, sea star
+sea urchin
+sea cucumber, holothurian
+wood rabbit, cottontail, cottontail rabbit
+hare
+Angora, Angora rabbit
+hamster
+porcupine, hedgehog
+fox squirrel, eastern fox squirrel, Sciurus niger
+marmot
+beaver
+guinea pig, Cavia cobaya
+sorrel
+zebra
+hog, pig, grunter, squealer, Sus scrofa
+wild boar, boar, Sus scrofa
+warthog
+hippopotamus, hippo, river horse, Hippopotamus amphibius
+ox
+water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
+bison
+ram, tup
+bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis
+ibex, Capra ibex
+hartebeest
+impala, Aepyceros melampus
+gazelle
+Arabian camel, dromedary, Camelus dromedarius
+llama
+weasel
+mink
+polecat, fitch, foulmart, foumart, Mustela putorius
+black-footed ferret, ferret, Mustela nigripes
+otter
+skunk, polecat, wood pussy
+badger
+armadillo
+three-toed sloth, ai, Bradypus tridactylus
+orangutan, orang, orangutang, Pongo pygmaeus
+gorilla, Gorilla gorilla
+chimpanzee, chimp, Pan troglodytes
+gibbon, Hylobates lar
+siamang, Hylobates syndactylus, Symphalangus syndactylus
+guenon, guenon monkey
+patas, hussar monkey, Erythrocebus patas
+baboon
+macaque
+langur
+colobus, colobus monkey
+proboscis monkey, Nasalis larvatus
+marmoset
+capuchin, ringtail, Cebus capucinus
+howler monkey, howler
+titi, titi monkey
+spider monkey, Ateles geoffroyi
+squirrel monkey, Saimiri sciureus
+Madagascar cat, ring-tailed lemur, Lemur catta
+indri, indris, Indri indri, Indri brevicaudatus
+Indian elephant, Elephas maximus
+African elephant, Loxodonta africana
+lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
+giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
+barracouta, snoek
+eel
+coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch
+rock beauty, Holocanthus tricolor
+anemone fish
+sturgeon
+gar, garfish, garpike, billfish, Lepisosteus osseus
+lionfish
+puffer, pufferfish, blowfish, globefish
+abacus
+abaya
+academic gown, academic robe, judge's robe
+accordion, piano accordion, squeeze box
+acoustic guitar
+aircraft carrier, carrier, flattop, attack aircraft carrier
+airliner
+airship, dirigible
+altar
+ambulance
+amphibian, amphibious vehicle
+analog clock
+apiary, bee house
+apron
+ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin
+assault rifle, assault gun
+backpack, back pack, knapsack, packsack, rucksack, haversack
+bakery, bakeshop, bakehouse
+balance beam, beam
+balloon
+ballpoint, ballpoint pen, ballpen, Biro
+Band Aid
+banjo
+bannister, banister, balustrade, balusters, handrail
+barbell
+barber chair
+barbershop
+barn
+barometer
+barrel, cask
+barrow, garden cart, lawn cart, wheelbarrow
+baseball
+basketball
+bassinet
+bassoon
+bathing cap, swimming cap
+bath towel
+bathtub, bathing tub, bath, tub
+beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
+beacon, lighthouse, beacon light, pharos
+beaker
+bearskin, busby, shako
+beer bottle
+beer glass
+bell cote, bell cot
+bib
+bicycle-built-for-two, tandem bicycle, tandem
+bikini, two-piece
+binder, ring-binder
+binoculars, field glasses, opera glasses
+birdhouse
+boathouse
+bobsled, bobsleigh, bob
+bolo tie, bolo, bola tie, bola
+bonnet, poke bonnet
+bookcase
+bookshop, bookstore, bookstall
+bottlecap
+bow
+bow tie, bow-tie, bowtie
+brass, memorial tablet, plaque
+brassiere, bra, bandeau
+breakwater, groin, groyne, mole, bulwark, seawall, jetty
+breastplate, aegis, egis
+broom
+bucket, pail
+buckle
+bulletproof vest
+bullet train, bullet
+butcher shop, meat market
+cab, hack, taxi, taxicab
+caldron, cauldron
+candle, taper, wax light
+cannon
+canoe
+can opener, tin opener
+cardigan
+car mirror
+carousel, carrousel, merry-go-round, roundabout, whirligig
+carpenter's kit, tool kit
+carton
+car wheel
+cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM
+cassette
+cassette player
+castle
+catamaran
+CD player
+cello, violoncello
+cellular telephone, cellular phone, cellphone, cell, mobile phone
+chain
+chainlink fence
+chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour
+chain saw, chainsaw
+chest
+chiffonier, commode
+chime, bell, gong
+china cabinet, china closet
+Christmas stocking
+church, church building
+cinema, movie theater, movie theatre, movie house, picture palace
+cleaver, meat cleaver, chopper
+cliff dwelling
+cloak
+clog, geta, patten, sabot
+cocktail shaker
+coffee mug
+coffeepot
+coil, spiral, volute, whorl, helix
+combination lock
+computer keyboard, keypad
+confectionery, confectionary, candy store
+container ship, containership, container vessel
+convertible
+corkscrew, bottle screw
+cornet, horn, trumpet, trump
+cowboy boot
+cowboy hat, ten-gallon hat
+cradle
+crane
+crash helmet
+crate
+crib, cot
+Crock Pot
+croquet ball
+crutch
+cuirass
+dam, dike, dyke
+desk
+desktop computer
+dial telephone, dial phone
+diaper, nappy, napkin
+digital clock
+digital watch
+dining table, board
+dishrag, dishcloth
+dishwasher, dish washer, dishwashing machine
+disk brake, disc brake
+dock, dockage, docking facility
+dogsled, dog sled, dog sleigh
+dome
+doormat, welcome mat
+drilling platform, offshore rig
+drum, membranophone, tympan
+drumstick
+dumbbell
+Dutch oven
+electric fan, blower
+electric guitar
+electric locomotive
+entertainment center
+envelope
+espresso maker
+face powder
+feather boa, boa
+file, file cabinet, filing cabinet
+fireboat
+fire engine, fire truck
+fire screen, fireguard
+flagpole, flagstaff
+flute, transverse flute
+folding chair
+football helmet
+forklift
+fountain
+fountain pen
+four-poster
+freight car
+French horn, horn
+frying pan, frypan, skillet
+fur coat
+garbage truck, dustcart
+gasmask, respirator, gas helmet
+gas pump, gasoline pump, petrol pump, island dispenser
+goblet
+go-kart
+golf ball
+golfcart, golf cart
+gondola
+gong, tam-tam
+gown
+grand piano, grand
+greenhouse, nursery, glasshouse
+grille, radiator grille
+grocery store, grocery, food market, market
+guillotine
+hair slide
+hair spray
+half track
+hammer
+hamper
+hand blower, blow dryer, blow drier, hair dryer, hair drier
+hand-held computer, hand-held microcomputer
+handkerchief, hankie, hanky, hankey
+hard disc, hard disk, fixed disk
+harmonica, mouth organ, harp, mouth harp
+harp
+harvester, reaper
+hatchet
+holster
+home theater, home theatre
+honeycomb
+hook, claw
+hoopskirt, crinoline
+horizontal bar, high bar
+horse cart, horse-cart
+hourglass
+iPod
+iron, smoothing iron
+jack-o'-lantern
+jean, blue jean, denim
+jeep, landrover
+jersey, T-shirt, tee shirt
+jigsaw puzzle
+jinrikisha, ricksha, rickshaw
+joystick
+kimono
+knee pad
+knot
+lab coat, laboratory coat
+ladle
+lampshade, lamp shade
+laptop, laptop computer
+lawn mower, mower
+lens cap, lens cover
+letter opener, paper knife, paperknife
+library
+lifeboat
+lighter, light, igniter, ignitor
+limousine, limo
+liner, ocean liner
+lipstick, lip rouge
+Loafer
+lotion
+loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
+loupe, jeweler's loupe
+lumbermill, sawmill
+magnetic compass
+mailbag, postbag
+mailbox, letter box
+maillot
+maillot, tank suit
+manhole cover
+maraca
+marimba, xylophone
+mask
+matchstick
+maypole
+maze, labyrinth
+measuring cup
+medicine chest, medicine cabinet
+megalith, megalithic structure
+microphone, mike
+microwave, microwave oven
+military uniform
+milk can
+minibus
+miniskirt, mini
+minivan
+missile
+mitten
+mixing bowl
+mobile home, manufactured home
+Model T
+modem
+monastery
+monitor
+moped
+mortar
+mortarboard
+mosque
+mosquito net
+motor scooter, scooter
+mountain bike, all-terrain bike, off-roader
+mountain tent
+mouse, computer mouse
+mousetrap
+moving van
+muzzle
+nail
+neck brace
+necklace
+nipple
+notebook, notebook computer
+obelisk
+oboe, hautboy, hautbois
+ocarina, sweet potato
+odometer, hodometer, mileometer, milometer
+oil filter
+organ, pipe organ
+oscilloscope, scope, cathode-ray oscilloscope, CRO
+overskirt
+oxcart
+oxygen mask
+packet
+paddle, boat paddle
+paddlewheel, paddle wheel
+padlock
+paintbrush
+pajama, pyjama, pj's, jammies
+palace
+panpipe, pandean pipe, syrinx
+paper towel
+parachute, chute
+parallel bars, bars
+park bench
+parking meter
+passenger car, coach, carriage
+patio, terrace
+pay-phone, pay-station
+pedestal, plinth, footstall
+pencil box, pencil case
+pencil sharpener
+perfume, essence
+Petri dish
+photocopier
+pick, plectrum, plectron
+pickelhaube
+picket fence, paling
+pickup, pickup truck
+pier
+piggy bank, penny bank
+pill bottle
+pillow
+ping-pong ball
+pinwheel
+pirate, pirate ship
+pitcher, ewer
+plane, carpenter's plane, woodworking plane
+planetarium
+plastic bag
+plate rack
+plow, plough
+plunger, plumber's helper
+Polaroid camera, Polaroid Land camera
+pole
+police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria
+poncho
+pool table, billiard table, snooker table
+pop bottle, soda bottle
+pot, flowerpot
+potter's wheel
+power drill
+prayer rug, prayer mat
+printer
+prison, prison house
+projectile, missile
+projector
+puck, hockey puck
+punching bag, punch bag, punching ball, punchball
+purse
+quill, quill pen
+quilt, comforter, comfort, puff
+racer, race car, racing car
+racket, racquet
+radiator
+radio, wireless
+radio telescope, radio reflector
+rain barrel
+recreational vehicle, RV, R.V.
+reel
+reflex camera
+refrigerator, icebox
+remote control, remote
+restaurant, eating house, eating place, eatery
+revolver, six-gun, six-shooter
+rifle
+rocking chair, rocker
+rotisserie
+rubber eraser, rubber, pencil eraser
+rugby ball
+rule, ruler
+running shoe
+safe
+safety pin
+saltshaker, salt shaker
+sandal
+sarong
+sax, saxophone
+scabbard
+scale, weighing machine
+school bus
+schooner
+scoreboard
+screen, CRT screen
+screw
+screwdriver
+seat belt, seatbelt
+sewing machine
+shield, buckler
+shoe shop, shoe-shop, shoe store
+shoji
+shopping basket
+shopping cart
+shovel
+shower cap
+shower curtain
+ski
+ski mask
+sleeping bag
+slide rule, slipstick
+sliding door
+slot, one-armed bandit
+snorkel
+snowmobile
+snowplow, snowplough
+soap dispenser
+soccer ball
+sock
+solar dish, solar collector, solar furnace
+sombrero
+soup bowl
+space bar
+space heater
+space shuttle
+spatula
+speedboat
+spider web, spider's web
+spindle
+sports car, sport car
+spotlight, spot
+stage
+steam locomotive
+steel arch bridge
+steel drum
+stethoscope
+stole
+stone wall
+stopwatch, stop watch
+stove
+strainer
+streetcar, tram, tramcar, trolley, trolley car
+stretcher
+studio couch, day bed
+stupa, tope
+submarine, pigboat, sub, U-boat
+suit, suit of clothes
+sundial
+sunglass
+sunglasses, dark glasses, shades
+sunscreen, sunblock, sun blocker
+suspension bridge
+swab, swob, mop
+sweatshirt
+swimming trunks, bathing trunks
+swing
+switch, electric switch, electrical switch
+syringe
+table lamp
+tank, army tank, armored combat vehicle, armoured combat vehicle
+tape player
+teapot
+teddy, teddy bear
+television, television system
+tennis ball
+thatch, thatched roof
+theater curtain, theatre curtain
+thimble
+thresher, thrasher, threshing machine
+throne
+tile roof
+toaster
+tobacco shop, tobacconist shop, tobacconist
+toilet seat
+torch
+totem pole
+tow truck, tow car, wrecker
+toyshop
+tractor
+trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi
+tray
+trench coat
+tricycle, trike, velocipede
+trimaran
+tripod
+triumphal arch
+trolleybus, trolley coach, trackless trolley
+trombone
+tub, vat
+turnstile
+typewriter keyboard
+umbrella
+unicycle, monocycle
+upright, upright piano
+vacuum, vacuum cleaner
+vase
+vault
+velvet
+vending machine
+vestment
+viaduct
+violin, fiddle
+volleyball
+waffle iron
+wall clock
+wallet, billfold, notecase, pocketbook
+wardrobe, closet, press
+warplane, military plane
+washbasin, handbasin, washbowl, lavabo, wash-hand basin
+washer, automatic washer, washing machine
+water bottle
+water jug
+water tower
+whiskey jug
+whistle
+wig
+window screen
+window shade
+Windsor tie
+wine bottle
+wing
+wok
+wooden spoon
+wool, woolen, woollen
+worm fence, snake fence, snake-rail fence, Virginia fence
+wreck
+yawl
+yurt
+web site, website, internet site, site
+comic book
+crossword puzzle, crossword
+street sign
+traffic light, traffic signal, stoplight
+book jacket, dust cover, dust jacket, dust wrapper
+menu
+plate
+guacamole
+consomme
+hot pot, hotpot
+trifle
+ice cream, icecream
+ice lolly, lolly, lollipop, popsicle
+French loaf
+bagel, beigel
+pretzel
+cheeseburger
+hotdog, hot dog, red hot
+mashed potato
+head cabbage
+broccoli
+cauliflower
+zucchini, courgette
+spaghetti squash
+acorn squash
+butternut squash
+cucumber, cuke
+artichoke, globe artichoke
+bell pepper
+cardoon
+mushroom
+Granny Smith
+strawberry
+orange
+lemon
+fig
+pineapple, ananas
+banana
+jackfruit, jak, jack
+custard apple
+pomegranate
+hay
+carbonara
+chocolate sauce, chocolate syrup
+dough
+meat loaf, meatloaf
+pizza, pizza pie
+potpie
+burrito
+red wine
+espresso
+cup
+eggnog
+alp
+bubble
+cliff, drop, drop-off
+coral reef
+geyser
+lakeside, lakeshore
+promontory, headland, head, foreland
+sandbar, sand bar
+seashore, coast, seacoast, sea-coast
+valley, vale
+volcano
+ballplayer, baseball player
+groom, bridegroom
+scuba diver
+rapeseed
+daisy
+yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum
+corn
+acorn
+hip, rose hip, rosehip
+buckeye, horse chestnut, conker
+coral fungus
+agaric
+gyromitra
+stinkhorn, carrion fungus
+earthstar
+hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa
+bolete
+ear, spike, capitulum
+toilet tissue, toilet paper, bathroom tissue
\ No newline at end of file
diff --git a/resources/img_class/samples/cat.bmp b/resources/img_class/samples/cat.bmp
new file mode 100644
index 0000000..b1f3c69
--- /dev/null
+++ b/resources/img_class/samples/cat.bmp
Binary files differ
diff --git a/resources/img_class/samples/dog.bmp b/resources/img_class/samples/dog.bmp
new file mode 100644
index 0000000..180ba3c
--- /dev/null
+++ b/resources/img_class/samples/dog.bmp
Binary files differ
diff --git a/resources/img_class/samples/files.md b/resources/img_class/samples/files.md
new file mode 100644
index 0000000..c031ecd
--- /dev/null
+++ b/resources/img_class/samples/files.md
@@ -0,0 +1,12 @@
+# Sample images
+
+The sample images provided are under Creative Commons License. The links are documented here for traceability:
+
+- [kimono.bmp](https://www.pexels.com/photo/three-geisha-walking-between-buildings-1325837/)
+- [tiger.bmp](https://www.pexels.com/photo/tiger-in-green-grass-near-the-tree-during-daytime-162173/)
+- [cat.bmp](https://www.pexels.com/photo/cat-whiskers-kitty-tabby-20787/)
+- [dog.bmp](https://www.pexels.com/photo/adult-black-pug-1851164/)
+
+## License
+
+[Creative Commons Attribution 1.0 Generic](../../LICENSE_CC_1.0.txt).
diff --git a/resources/img_class/samples/kimono.bmp b/resources/img_class/samples/kimono.bmp
new file mode 100644
index 0000000..c1274a5
--- /dev/null
+++ b/resources/img_class/samples/kimono.bmp
Binary files differ
diff --git a/resources/img_class/samples/tiger.bmp b/resources/img_class/samples/tiger.bmp
new file mode 100644
index 0000000..b53b0c0
--- /dev/null
+++ b/resources/img_class/samples/tiger.bmp
Binary files differ
diff --git a/resources/kws/labels/ds_cnn_labels.txt b/resources/kws/labels/ds_cnn_labels.txt
new file mode 100644
index 0000000..ba41645
--- /dev/null
+++ b/resources/kws/labels/ds_cnn_labels.txt
@@ -0,0 +1,12 @@
+_silence_
+_unknown_
+yes
+no
+up
+down
+left
+right
+on
+off
+stop
+go
\ No newline at end of file
diff --git a/resources/kws/samples/down.wav b/resources/kws/samples/down.wav
new file mode 100644
index 0000000..7c77f63
--- /dev/null
+++ b/resources/kws/samples/down.wav
Binary files differ
diff --git a/resources/kws/samples/files.md b/resources/kws/samples/files.md
new file mode 100644
index 0000000..29d42ae
--- /dev/null
+++ b/resources/kws/samples/files.md
@@ -0,0 +1,50 @@
+# Sample wav audio clip
+
+The sample wav audio clips provided are under Creative Commons License (Creative Commons Attribution 4.0 International Public License).
+The source is http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz, in particular the files used are listed here for traceability:
+
+- down.wav
+
+    ```tree
+    speech_commands_v0.02
+    └── down
+       └── 0a9f9af7_nohash_2.wav
+    ```
+
+- rightleftup.wav
+
+    ```tree
+    speech_commands_v0.02
+    ├── left
+    │   └── 0d82fd99_nohash_3.wav
+    ├── right
+    │   └── 0d82fd99_nohash_0.wav
+    └── up
+       └── 0a2b400e_nohash_1.wav
+    ```
+
+- yes.wav
+
+    ```tree
+    speech_commands_v0.02
+    └── yes
+       └── 0b40aa8e_nohash_0.wav
+    ```
+
+- yesnogostop.wav
+
+    ```tree
+    speech_commands_v0.02
+    ├── go
+    │   └── 0c2ca723_nohash_2.wav
+    ├── no
+    │   └── 0a2b400e_nohash_0.wav
+    ├── stop
+    │   └── 0a196374_nohash_0.wav
+    └── yes
+       └── 0b40aa8e_nohash_0.wav
+    ```
+
+## License
+
+[Creative Commons Attribution 4.0 International Public License](../../LICENSE_CC_4.0.txt).
diff --git a/resources/kws/samples/rightleftup.wav b/resources/kws/samples/rightleftup.wav
new file mode 100644
index 0000000..47551e8
--- /dev/null
+++ b/resources/kws/samples/rightleftup.wav
Binary files differ
diff --git a/resources/kws/samples/yes.wav b/resources/kws/samples/yes.wav
new file mode 100644
index 0000000..f3489db
--- /dev/null
+++ b/resources/kws/samples/yes.wav
Binary files differ
diff --git a/resources/kws/samples/yesnogostop.wav b/resources/kws/samples/yesnogostop.wav
new file mode 100644
index 0000000..2a2c0ac
--- /dev/null
+++ b/resources/kws/samples/yesnogostop.wav
Binary files differ
diff --git a/resources/kws_asr/labels/ds_cnn_labels.txt b/resources/kws_asr/labels/ds_cnn_labels.txt
new file mode 100644
index 0000000..ba41645
--- /dev/null
+++ b/resources/kws_asr/labels/ds_cnn_labels.txt
@@ -0,0 +1,12 @@
+_silence_
+_unknown_
+yes
+no
+up
+down
+left
+right
+on
+off
+stop
+go
\ No newline at end of file
diff --git a/resources/kws_asr/labels/labels_wav2letter.txt b/resources/kws_asr/labels/labels_wav2letter.txt
new file mode 100644
index 0000000..8fb2fc8
--- /dev/null
+++ b/resources/kws_asr/labels/labels_wav2letter.txt
@@ -0,0 +1,29 @@
+a
+b
+c
+d
+e
+f
+g
+h
+i
+j
+k
+l
+m
+n
+o
+p
+q
+r
+s
+t
+u
+v
+w
+x
+y
+z
+'
+ 
+$
\ No newline at end of file
diff --git a/resources/kws_asr/samples/files.md b/resources/kws_asr/samples/files.md
new file mode 100644
index 0000000..6db8d65
--- /dev/null
+++ b/resources/kws_asr/samples/files.md
@@ -0,0 +1,22 @@
+# Sample wav audio clip
+
+The sample wav audio clips provided are under Creative Commons License (Creative Commons Attribution 4.0 International Public License).
+The source is http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz, in particular the files used are listed here for traceability:
+
+- yesnogostop.wav
+
+    ```tree
+    speech_commands_v0.02
+    ├── go
+    │   └── 0c2ca723_nohash_2.wav
+    ├── no
+    │   └── 0a2b400e_nohash_0.wav
+    ├── stop
+    │   └── 0a196374_nohash_0.wav
+    └── yes
+       └── 0b40aa8e_nohash_0.wav
+    ```
+
+## License
+
+[Creative Commons Attribution 4.0 International Public License](../../LICENSE_CC_4.0.txt).
diff --git a/resources/kws_asr/samples/yesnogostop.wav b/resources/kws_asr/samples/yesnogostop.wav
new file mode 100644
index 0000000..2a2c0ac
--- /dev/null
+++ b/resources/kws_asr/samples/yesnogostop.wav
Binary files differ
diff --git a/scripts/cmake/bare-metal-sources.cmake b/scripts/cmake/bare-metal-sources.cmake
new file mode 100644
index 0000000..3e24d7b
--- /dev/null
+++ b/scripts/cmake/bare-metal-sources.cmake
@@ -0,0 +1,170 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build_baremetal)
+set(PLAT_HAL ${CMAKE_CURRENT_SOURCE_DIR}/source/application/hal/platforms/bare-metal)
+
+# If target platform not defined raise an error
+# TARGET_PLATFORM either should have been defined by the user or set to default value mps3
+if (NOT DEFINED TARGET_PLATFORM)
+    message(FATAL_ERROR "Invalid target platform, specify TARGET_PLATFORM=mps3")
+endif ()
+message(STATUS "target platform ${TARGET_PLATFORM}")
+
+set(SOURCE_GEN_DIR          ${CMAKE_BINARY_DIR}/generated/bsp)
+if (NOT DEFINED MEM_PROFILES_SRC_DIR)
+    set(MEM_PROFILES_SRC_DIR    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/subsystem-profiles)
+endif()
+set(MEM_PROFILE_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/peripheral_memmap.h.template)
+set(IRQ_PROFILE_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/peripheral_irqs.h.template)
+set(MEM_REGIONS_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/mem_regions.h.template)
+set(TA_SETTINGS_TEMPLATE    ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/templates/timing_adapter_settings.template)
+set(TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME  "libtensorflow-microlite.a")
+set(TENSORFLOW_LITE_MICRO_FLAG               "-DTF_LITE_STATIC_MEMORY")
+set(ETHOS_U55_FLAG          "-DARM_NPU=1")
+
+if (ETHOS_U55_ENABLED)
+    set(OPTIONAL_FLAGS      "${OPTIONAL_FLAGS} ${ETHOS_U55_FLAG}")
+endif ()
+
+# Set specific flags depending on target platform and subsystem
+if (TARGET_PLATFORM STREQUAL mps3)
+    set(MPS3_PLATFORM_FLAG          "-DMPS3_PLATFORM=1")
+
+    # If target platform is mps3 and subsystem not defined raise an error,
+    # TARGET_SUBSYSTEM either should have been defined by the user or set to a default value
+    if (NOT DEFINED TARGET_SUBSYSTEM)
+        message(FATAL_ERROR "Target subsystem for mps3 undefined, "
+                            "specify -DTARGET_SUBSYSTEM=<sse-200 or sse-300>")
+    endif ()
+
+    if (TARGET_SUBSYSTEM STREQUAL sse-200 OR TARGET_SUBSYSTEM STREQUAL sse-300)
+        message(STATUS          "target subsystem is ${TARGET_SUBSYSTEM}")
+        set(BSP_PACKAGE_DIR     "${PLAT_HAL}/bsp/bsp-packs/mps3")
+        set(SCAT_FILE           "${PLAT_HAL}/bsp/mem_layout/mps3-${TARGET_SUBSYSTEM}.sct")
+
+        # Include the mem profile definitions specific to our target subsystem
+        include(${MEM_PROFILES_SRC_DIR}/corstone-${TARGET_SUBSYSTEM}.cmake)
+        set(OPTIONAL_FLAGS      "${OPTIONAL_FLAGS} ${MPS3_PLATFORM_FLAG}")
+    else ()
+        message(FATAL_ERROR "Non compatible target subsystem: ${TARGET_SUBSYSTEM}")
+    endif ()
+elseif (TARGET_PLATFORM STREQUAL simple_platform)
+    set(BSP_PACKAGE_DIR     "${PLAT_HAL}/bsp/bsp-packs/${TARGET_PLATFORM}")
+    set(SCAT_FILE           "${PLAT_HAL}/bsp/mem_layout/${TARGET_PLATFORM}.sct")
+    include(${MEM_PROFILES_SRC_DIR}/${TARGET_PLATFORM}.cmake)
+    set(OPTIONAL_FLAGS      "${OPTIONAL_FLAGS}")
+else ()
+    message(FATAL_ERROR "Non compatible target platform ${TARGET_PLATFORM}")
+endif ()
+
+if (ETHOS_U55_ENABLED)
+    USER_OPTION(TA_CONFIG_FILE "Path to the timing adapter configuration file"
+            "${CMAKE_SCRIPTS_DIR}/ta_config.cmake"
+            FILEPATH)
+
+    # must be included after target subsystem CMake file
+    include(${TA_CONFIG_FILE})
+endif()
+
+# Generate the memory map header file from the mem profile cmake included in one of
+# the previous sections
+message(STATUS "Configuring file from ${MEM_PROFILE_TEMPLATE}"
+                                   ", ${IRQ_PROFILE_TEMPLATE}"
+                                " and ${MEM_REGIONS_TEMPLATE}")
+
+configure_file("${MEM_PROFILE_TEMPLATE}" "${SOURCE_GEN_DIR}/peripheral_memmap.h")
+configure_file("${IRQ_PROFILE_TEMPLATE}" "${SOURCE_GEN_DIR}/peripheral_irqs.h")
+configure_file("${MEM_REGIONS_TEMPLATE}" "${SOURCE_GEN_DIR}/mem_regions.h")
+configure_file("${TA_SETTINGS_TEMPLATE}" "${SOURCE_GEN_DIR}/timing_adapter_settings.h")
+
+message(STATUS "Scatter file: ${SCAT_FILE}")
+message(STATUS "Using BSP package from: ${BSP_PACKAGE_DIR}")
+
+if (DEFINED VERIFY_TEST_OUTPUT)
+    message(STATUS "Test output verification flag is: ${VERIFY_TEST_OUTPUT}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -DVERIFY_TEST_OUTPUT=${VERIFY_TEST_OUTPUT}")
+endif ()
+
+if (DEFINED LOG_LEVEL)
+    message(STATUS "Setting log level to ${LOG_LEVEL}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -DLOG_LEVEL=${LOG_LEVEL}")
+endif()
+
+if (DEFINED ACTIVATION_BUF_SRAM_SZ)
+    message(STATUS "Maximum SRAM space for activations buffers for this system: ${ACTIVATION_BUF_SRAM_SZ}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -DACTIVATION_BUF_SRAM_SZ=${ACTIVATION_BUF_SRAM_SZ}")
+endif()
+
+if (DEFINED ARMCLANG_DEBUG_DWARF_LEVEL)
+    message(STATUS "setting dwarf conformance level to gdwarf-${ARMCLANG_DEBUG_DWARF_LEVEL}")
+    set(OPTIONAL_FLAGS "${OPTIONAL_FLAGS} -gdwarf-${ARMCLANG_DEBUG_DWARF_LEVEL}")
+endif()
+
+set(COMPILER_FLAGS              "${ALL_COMMON_FLAGS} ${TENSORFLOW_LITE_MICRO_FLAG} ${PROFILING_OPT} ${OPTIONAL_FLAGS}")
+# For some reason, cmake doesn't pass the c++ standard flag, adding it manually
+set(CMAKE_CXX_FLAGS             "${COMPILER_FLAGS} -std=c++11" CACHE INTERNAL "")
+set(CMAKE_C_FLAGS               "${COMPILER_FLAGS}" CACHE INTERNAL "")
+set(CMAKE_ASM_FLAGS             "${CPU_LD}")
+set(CMAKE_ASM_COMPILE_OBJECT    ${CMAKE_CXX_FLAGS})
+
+add_link_options(--strict --callgraph --load_addr_map_info --map)
+add_link_options(--symbols --xref --scatter=${SCAT_FILE})
+
+# Warnings to be ignored:
+# L6314W = No section matches pattern
+# L6439W = Multiply defined Global Symbol
+add_link_options(--diag_suppress=L6439W,L6314W)
+add_link_options(--info sizes,totals,unused,veneers --entry Reset_Handler)
+
+if (CMAKE_BUILD_TYPE STREQUAL Release)
+    add_link_options(--no_debug)
+endif ()
+
+set(CMAKE_EXE_LINKER_FLAGS "${CPU_LD}")
+
+set(PLAT_BSP_INCLUDES
+    ${PLAT_HAL}/bsp/cmsis-device/include
+    ${PLAT_HAL}/bsp/include/
+    ${PLAT_HAL}/bsp/bsp-core/include
+    ${BSP_PACKAGE_DIR}/include
+)
+
+# Include directories:
+set(PLAT_INCLUDE_DIRS
+    ${PLAT_BSP_INCLUDES}
+    ${PLAT_HAL}/utils/include
+    ${PLAT_HAL}/images/include
+    ${PLAT_HAL}/data_presentation/lcd/include
+    ${PLAT_HAL}/timer/include
+    ${SOURCE_GEN_DIR}
+    )
+
+# Source files
+file(GLOB_RECURSE SRC_PLAT_HAL
+
+    # Higher level HAL sources - software logic implementations
+    "${PLAT_HAL}/data_*/*.c"
+    "${PLAT_HAL}/images/*.c"
+    "${PLAT_HAL}/timer/*.c"
+    "${PLAT_HAL}/utils/*.c"
+
+    # Low level HAL sources - these enable interaction with
+    # the actual hardware
+    "${PLAT_HAL}/bsp/cmsis-device/*.c"
+    "${PLAT_HAL}/bsp/bsp-core/*.c"
+    "${BSP_PACKAGE_DIR}/*.c"
+    )
diff --git a/scripts/cmake/bare-metal-toolchain.cmake b/scripts/cmake/bare-metal-toolchain.cmake
new file mode 100644
index 0000000..5d91b98
--- /dev/null
+++ b/scripts/cmake/bare-metal-toolchain.cmake
@@ -0,0 +1,65 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+# specify the cross compiler
+set(CMAKE_C_COMPILER                armclang)
+set(CMAKE_CXX_COMPILER              armclang)
+set(CMAKE_C_LINKER_PREFERENCE       armlink)
+set(CMAKE_ASM_LINKER_PREFERENCE     armlink)
+set(CMAKE_ASM_COMPILER              armasm)
+set(CMAKE_ASM_COMPILER_AR           armar)
+
+set(CMAKE_CROSSCOMPILING            true)
+set(CMAKE_SYSTEM_NAME               Generic)
+
+set(MIN_ARM_CLANG_VERSION           6.14)
+
+if (NOT DEFINED CMAKE_SYSTEM_PROCESSOR)
+    set(CMAKE_SYSTEM_PROCESSOR      cortex-m55)
+endif()
+
+# Skip compiler test execution
+set(CMAKE_C_COMPILER_WORKS          1)
+set(CMAKE_CXX_COMPILER_WORKS        1)
+
+set(PLATFORM_HAL                    1)
+
+set(WARNING_OPTS                    "-Wall -Wextra -Wvla")
+set(SPECIAL_OPTS                    "-fno-rtti -funsigned-char -fno-function-sections -fno-exceptions")
+set(PLATFORM_FLAGS                  "-mthumb --target=arm-arm-non-eabi -mlittle-endian -DPLATFORM_HAL=${PLATFORM_HAL}")
+
+set(CMAKE_C_FLAGS_DEBUG             "-DDEBUG -O0")
+set(CMAKE_C_FLAGS_RELEASE           "-DNDEBUG -O3")
+
+set(CMAKE_CXX_FLAGS_DEBUG           "-DDEBUG -O0")
+set(CMAKE_CXX_FLAGS_RELEASE         "-DNDEBUG -O3")
+
+if (CMAKE_SYSTEM_PROCESSOR STREQUAL cortex-m55)
+    # Flags for cortex-m55
+    set(CPU_CORTEX_M55              1)
+    set(CPU_CC                      "-mcpu=cortex-m55 -mfloat-abi=hard -MD -DCPU_CORTEX_M55=1 -DARM_MATH_DSP -DARM_MATH_LOOPUNROLL -D__FPU_USED=1")
+    set(CPU_LD                      "--cpu=8.1-M.Main.dsp")
+elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL cortex-m33)
+    # Flags for cortex-m33 to go here
+endif()
+
+set(ALL_COMMON_FLAGS                "${CPU_CC} ${WARNING_OPTS} ${SPECIAL_OPTS} ${PLATFORM_FLAGS}")
+
+function(enforce_compiler_version)
+    if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${MIN_ARM_CLANG_VERSION})
+        message( FATAL_ERROR "Arm compiler version must be ${MIN_ARM_CLANG_VERSION} or greater to support ${CMAKE_SYSTEM_PROCESSOR} architecture." )
+    endif()
+endfunction()
diff --git a/scripts/cmake/cmsis-dsp.cmake b/scripts/cmake/cmsis-dsp.cmake
new file mode 100644
index 0000000..cb0243b
--- /dev/null
+++ b/scripts/cmake/cmsis-dsp.cmake
@@ -0,0 +1,74 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMSIS-DSP library CMake helper script.
+
+# 1. We should be cross-compiling (non-native target)
+if (TARGET_PLATFORM STREQUAL native)
+    message(FATAL_ERROR "No CMSIS-DSP support for native target.")
+endif()
+
+# 2. Check if CMSIS sources have been defined
+if (NOT DEFINED CMSIS_SRC_PATH)
+    message(FATAL_ERROR "CMSIS path should be defined for CMSIS-DSP library to be built")
+endif()
+
+# 3. Form a list of all the sources we need in CSMS-DSP library
+set(CMSIS_DSP_PATH_SUFFIX   "CMSIS/DSP")
+set(CMSIS_CORE_PATH_SUFFIX  "CMSIS/Core")
+set(CMSIS_DSP_SRC_DIR       "${CMSIS_SRC_PATH}/${CMSIS_DSP_PATH_SUFFIX}/Source")
+set(CMSIS_DSP_INC_DIR       "${CMSIS_SRC_PATH}/${CMSIS_DSP_PATH_SUFFIX}/Include")
+set(CMSIS_DSP_PRI_INC_DIR   "${CMSIS_SRC_PATH}/${CMSIS_DSP_PATH_SUFFIX}/PrivateInclude")
+set(CMSIS_CORE_INC_DIR      "${CMSIS_SRC_PATH}/${CMSIS_CORE_PATH_SUFFIX}/Include")
+
+file(GLOB_RECURSE
+    CMSIS_DSP_SRC
+    "${CMSIS_DSP_SRC_DIR}/arm_*.c")
+
+# 4. Create static library
+set(CMSIS_DSP_TARGET        cmsis-dsp)
+
+add_library(${CMSIS_DSP_TARGET} STATIC ${CMSIS_DSP_SRC})
+
+target_include_directories(${CMSIS_DSP_TARGET} PUBLIC
+                           ${CMSIS_DSP_INC_DIR}
+                           ${CMSIS_CORE_INC_DIR})
+target_include_directories(${CMSIS_DSP_TARGET} PRIVATE
+                           ${CMSIS_DSP_PRI_INC_DIR})
+
+# 5. Add any custom/conditional flags for compilation or linkage
+if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL cortex-m55)
+    target_compile_definitions(${CMSIS_DSP_TARGET} PUBLIC
+        ARM_MATH_MVEI
+        ARM_MATH_DSP
+        ARM_MATH_LOOPUNROLL)
+elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL cortex-m33)
+    # Placeholder, if building with Cortex-M33
+endif()
+
+
+# 6. Provide the library path for the top level CMake to use:
+set(CMSIS_DSP_LIB   "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/lib${CMSIS_DSP_TARGET}.a")
+message(STATUS "CMSIS_DSP_LIB set to be generated here: ${CMSIS_DSP_LIB}")
+
+message(STATUS "CMAKE_CURRENT_SOURCE_DIR: " ${CMAKE_CURRENT_SOURCE_DIR})
+message(STATUS "*******************************************************")
+message(STATUS "Library                                : " ${CMSIS_DSP_TARGET})
+message(STATUS "Build type                             : " ${CMAKE_BUILD_TYPE})
+message(STATUS "TARGET_PLATFORM                        : " ${TARGET_PLATFORM})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR                 : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/scripts/cmake/native-sources.cmake b/scripts/cmake/native-sources.cmake
new file mode 100644
index 0000000..743e075
--- /dev/null
+++ b/scripts/cmake/native-sources.cmake
@@ -0,0 +1,58 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+# Set the install prefix
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build_native)
+set(PLAT_HAL ${CMAKE_CURRENT_SOURCE_DIR}/source/application/hal/platforms/native)
+
+if (ETHOS_U55_ENABLED)
+    message(WARNING "EthosU can't be enabled for native builds."
+                    "Use -DETHOS_U55_ENABLED=0 flag for this target platform."
+                    "Overriding, disabling use of EthosU...")
+    set(ETHOS_U55_ENABLED OFF)
+endif()
+
+if (DEFINED LOG_LEVEL)
+    message(STATUS "Setting log level to ${LOG_LEVEL}")
+    set (LOG_FLAG "-DLOG_LEVEL=${LOG_LEVEL}")
+endif()
+
+set(TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME  "libtensorflow-microlite.a")
+set(TENSORFLOW_LITE_MICRO_FLAGS "-DTF_LITE_STATIC_MEMORY -DACTIVATION_BUF_SRAM_SZ=0")
+
+set(CMAKE_C_FLAGS
+        "${WARNING_FLAGS} ${SPECIAL_OPTS} ${PLATFORM_FLAGS}\
+        ${PROFILING_OPT} ${TF_FLAG} ${LOG_FLAG} ${TENSORFLOW_LITE_MICRO_FLAGS}"
+        CACHE INTERNAL "")
+set(CMAKE_CXX_FLAGS
+        "${WARNING_FLAGS} ${SPECIAL_OPTS} ${SPECIAL_OPTS_CXX}\
+        ${PLATFORM_FLAGS} ${PROFILING_OPT} ${TF_FLAG} ${LOG_FLAG}\
+        ${TENSORFLOW_LITE_MICRO_FLAGS}"
+        CACHE INTERNAL "")
+
+# Include directories:
+set(PLAT_INCLUDE_DIRS
+    ${PLAT_HAL}/utils/include
+    ${PLAT_HAL}/images/include
+    ${PLAT_HAL}/data_presentation/log/include
+    ${PLAT_HAL}/timer/include
+    )
+
+# Source files
+file(GLOB_RECURSE SRC_PLAT_HAL
+    "${PLAT_HAL}/**/*.c"
+    "${PLAT_HAL}/**/*.cc"
+    )
diff --git a/scripts/cmake/native-toolchain.cmake b/scripts/cmake/native-toolchain.cmake
new file mode 100644
index 0000000..2e28cd4
--- /dev/null
+++ b/scripts/cmake/native-toolchain.cmake
@@ -0,0 +1,40 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+set(CMAKE_CXX_COMPILER          g++)
+set(CMAKE_C_COMPILER            gcc)
+set(CMAKE_C_LINKER_PREFERENCE   gcc)
+set(CMAKE_CXX_LINKER_PREFERENCE gcc)
+
+set(CMAKE_C_FLAGS_DEBUG         "-DDEBUG -O0 -g")
+set(CMAKE_C_FLAGS_RELEASE       "-DNDEBUG -O3")
+
+set(CMAKE_CXX_FLAGS_DEBUG       "-DDEBUG -O0 -g")
+set(CMAKE_CXX_FLAGS_RELEASE     "-DNDEBUG -O3")
+
+# Platform specific directory:
+set(PLATFORM_HAL                3)
+set(WARNING_FLAGS               "-Wsign-compare -Wshadow         \
+                                 -Wextra -Wall -Wunused-function \
+                                 -Wmissing-field-initializers    \
+                                 -Wswitch -Wvla -Wunused-parameter")
+set(SPECIAL_OPTS                "-fPIC -pthread")
+set(PLATFORM_FLAGS              "-DPLATFORM_HAL=${PLATFORM_HAL}")
+set(SPECIAL_OPTS_CXX            "-fno-threadsafe-statics")
+set(CMAKE_EXE_LINKER_FLAGS      "-lm -lc -lstdc++ --verbose")
+
+function(enforce_compiler_version)
+endfunction()
diff --git a/scripts/cmake/source_gen_utils.cmake b/scripts/cmake/source_gen_utils.cmake
new file mode 100644
index 0000000..8653016
--- /dev/null
+++ b/scripts/cmake/source_gen_utils.cmake
@@ -0,0 +1,270 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+set(SCRIPTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/scripts)
+
+##############################################################################
+# This function generates C++ files for images located in the directory it is
+# pointed at. NOTE: uses python
+##############################################################################
+function(generate_images_code input_dir src_out hdr_out img_size)
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_dir_abs ${input_dir} ABSOLUTE)
+    get_filename_component(src_out_abs ${src_out} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${hdr_out} ABSOLUTE)
+
+    message(STATUS "Generating image files from ${input_dir_abs}")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_rgb_cpp.py
+        --image_path ${input_dir_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --image_size ${img_size} ${img_size}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate image files.")
+    endif ()
+
+endfunction()
+
+##############################################################################
+# This function generates C++ files for audio files located in the directory it is
+# pointed at. NOTE: uses python
+##############################################################################
+function(generate_audio_code input_dir src_out hdr_out s_rate_opt mono_opt off_opt duration_opt res_type_opt min_sample_opt)
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_dir_abs ${input_dir} ABSOLUTE)
+    get_filename_component(src_out_abs ${src_out} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${hdr_out} ABSOLUTE)
+
+    to_py_bool(mono_opt mono_opt_py)
+
+    message(STATUS "Generating audio files from ${input_dir_abs}")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_audio_cpp.py
+        --audio_path ${input_dir_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --sampling_rate ${s_rate_opt}
+        --mono ${mono_opt_py}
+        --offset ${off_opt}
+        --duration ${duration_opt}
+        --res_type ${res_type_opt}
+        --min_samples ${min_sample_opt}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate audio files.")
+    endif ()
+
+endfunction()
+
+##############################################################################
+# This function generates default empty input C++ files for applications with no
+# external input. Main use is for the inference runner. NOTE: uses python
+##############################################################################
+function(generate_default_input_code hdr_out)
+
+    # Absolute paths for passing into python script
+    get_filename_component(hdr_out_abs ${hdr_out} ABSOLUTE)
+
+    message(STATUS "Generating default input files")
+    execute_process(
+            COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_default_input_cpp.py
+            --header_folder_path ${hdr_out_abs}
+            RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate default input .")
+    endif ()
+
+endfunction()
+##############################################################################
+# This function generates C++ files for tflite NN model files.
+# @param[in]    MODEL_PATH      path to a tflite file
+# @param[in]    DESTINATION     directory in which the output cc must be
+#                               placed
+# @param[in]    EXPRESSIONS     C++ code expressions to add to the generated file
+# @param[in]    NAMESPACE       model name space
+# NOTE: Uses python
+##############################################################################
+function(generate_tflite_code)
+
+    set(multiValueArgs EXPRESSIONS NAMESPACE)
+    set(oneValueArgs MODEL_PATH DESTINATION)
+    cmake_parse_arguments(PARSED "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+    # Absolute paths for passing into python script
+    get_filename_component(ABS_MODEL_PATH ${PARSED_MODEL_PATH} ABSOLUTE)
+    get_filename_component(ABS_DESTINATION ${PARSED_DESTINATION} ABSOLUTE)
+
+    if (EXISTS ${ABS_MODEL_PATH})
+        message(STATUS "Using ${ABS_MODEL_PATH}")
+    else ()
+        message(FATAL_ERROR "${ABS_MODEL_PATH} not found!")
+    endif ()
+
+
+    foreach(expression ${PARSED_EXPRESSIONS})
+        set(py_arg_exp ${py_arg_exp} --expression=${expression})
+    endforeach()
+
+    foreach(name ${PARSED_NAMESPACE})
+        set(py_arg_exp ${py_arg_exp} --namespaces=${name})
+    endforeach()
+
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_model_cpp.py
+        --tflite_path ${ABS_MODEL_PATH}
+        --output_dir ${ABS_DESTINATION} ${py_arg_exp}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate model files.")
+    endif ()
+endfunction()
+
+
+##############################################################################
+# This function generates C++ file for a given labels' text file.
+# @param[in]    INPUT          Path to the label text file
+# @param[in]    DESTINATION_SRC directory in which the output cc must be
+#                               placed
+# @param[in]    DESTINATION_HDR directory in which the output h file must be
+#                               placed
+# @param[in]    OUTPUT_FILENAME    Path to required output file
+# @param[in]    NAMESPACE       data name space
+# NOTE: Uses python
+##############################################################################
+function(generate_labels_code)
+
+    set(multiValueArgs NAMESPACE)
+    set(oneValueArgs INPUT DESTINATION_SRC DESTINATION_HDR OUTPUT_FILENAME)
+    cmake_parse_arguments(PARSED "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_abs ${PARSED_INPUT} ABSOLUTE)
+    get_filename_component(src_out_abs ${PARSED_DESTINATION_SRC} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${PARSED_DESTINATION_HDR} ABSOLUTE)
+
+    message(STATUS "Generating labels file from ${PARSED_INPUT}")
+    file(REMOVE "${hdr_out_abs}/${PARSED_OUTPUT_FILENAME}.hpp")
+    file(REMOVE "${src_out_abs}/${PARSED_OUTPUT_FILENAME}.cc")
+
+    foreach(name ${PARSED_NAMESPACE})
+        set(py_arg_exp ${py_arg_exp} --namespaces=${name})
+    endforeach()
+
+    message(STATUS "writing to ${hdr_out_abs}/${PARSED_OUTPUT_FILENAME}.hpp and ${src_out_abs}/${PARSED_OUTPUT_FILENAME}.cc")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_labels_cpp.py
+        --labels_file ${input_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --output_file_name ${PARSED_OUTPUT_FILENAME} ${py_arg_exp}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate label files.")
+    endif ()
+endfunction()
+
+
+##############################################################################
+# This function generates C++ data files for test located in the directory it is
+# pointed at.
+# @param[in]    INPUT_DIR       directory in which are the npy files
+# @param[in]    DESTINATION_SRC directory in which the output cc must be
+#                               placed
+# @param[in]    DESTINATION_HDR directory in which the output h file must be
+#                               placed
+# @param[in]    USECASE         name of the sub-usecase
+# @param[in]    NAMESPACE       data name space
+# NOTE: Uses python
+##############################################################################
+function(generate_test_data_code)
+
+    set(multiValueArgs NAMESPACE)
+    set(oneValueArgs INPUT_DIR DESTINATION_SRC DESTINATION_HDR USECASE)
+    cmake_parse_arguments(PARSED "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+    # Absolute paths for passing into python script
+    get_filename_component(input_dir_abs ${PARSED_INPUT_DIR} ABSOLUTE)
+    get_filename_component(src_out_abs ${PARSED_DESTINATION_SRC} ABSOLUTE)
+    get_filename_component(hdr_out_abs ${PARSED_DESTINATION_HDR} ABSOLUTE)
+
+    foreach(name ${PARSED_NAMESPACE})
+        set(py_arg_exp ${py_arg_exp} --namespaces=${name})
+    endforeach()
+
+    message(STATUS "Generating test ifm and ofm files from ${input_dir_abs}")
+    execute_process(
+        COMMAND ${PYTHON} ${SCRIPTS_DIR}/py/gen_test_data_cpp.py
+        --data_folder_path ${input_dir_abs}
+        --source_folder_path ${src_out_abs}
+        --header_folder_path ${hdr_out_abs}
+        --usecase ${PARSED_USECASE}
+        ${py_arg_exp}
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to generate test data files.")
+    endif ()
+
+endfunction()
+
+
+##############################################################################
+# Function to prepare a python virtual environment for running the functions
+# outlined above.
+##############################################################################
+function(setup_source_generator)
+    if (${CMAKE_HOST_WIN32})
+#        windows python3 has python.exe
+        set(PY_EXEC python)
+        set(PYTHON ${CMAKE_BINARY_DIR}/pyenv/Scripts/${PY_EXEC})
+    else()
+        set(PY_EXEC python3)
+        set(PYTHON ${CMAKE_BINARY_DIR}/pyenv/bin/${PY_EXEC})
+    endif()
+    set(PYTHON ${PYTHON} PARENT_SCOPE)
+
+    if (EXISTS ${PYTHON})
+        message(STATUS "Using existing python at ${PYTHON}")
+        return()
+    endif ()
+    message(STATUS "Configuring python environment at ${PYTHON}")
+    execute_process(
+        COMMAND ${PY_EXEC} -m venv ${CMAKE_BINARY_DIR}/pyenv
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to setup python3 environment")
+    endif ()
+
+    execute_process(COMMAND ${PYTHON} -m pip install wheel)
+
+    execute_process(
+        COMMAND ${PYTHON} -m pip install -r ${SCRIPTS_DIR}/py/requirements.txt
+        RESULT_VARIABLE return_code
+    )
+    if (NOT return_code EQUAL "0")
+        message(FATAL_ERROR "Failed to setup python3 environment")
+    endif ()
+endfunction()
diff --git a/scripts/cmake/subsystem-profiles/corstone-sse-200.cmake b/scripts/cmake/subsystem-profiles/corstone-sse-200.cmake
new file mode 100644
index 0000000..8e2cd98
--- /dev/null
+++ b/scripts/cmake/subsystem-profiles/corstone-sse-200.cmake
@@ -0,0 +1,255 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMake configuration file for peripheral memory map for MPS3 as per SSE-200 design
+###################################################################################################
+#                              Application specific config                                        #
+###################################################################################################
+
+# This parameter is based on the linker/scatter script for SSE-200. Do not change this parameter
+# in isolation.
+set(ACTIVATION_BUF_SRAM_SZ "0x00200000" CACHE STRING "Maximum SRAM size for activation buffers")
+set(DESIGN_NAME            "SSE-200"    CACHE STRING "Design name")
+###################################################################################################
+#                                         Mem sizes                                               #
+###################################################################################################
+set(ITCM_SIZE             "0x00100000" CACHE STRING "ITCM size:         1 MiB")
+set(DTCM_BLK_SIZE         "0x00100000" CACHE STRING "DTCM size:         1 MiB, 4 banks")
+set(BRAM_SIZE             "0x00200000" CACHE STRING "BRAM size:         2 MiB")
+set(QSPI_SRAM_SIZE        "0x00800000" CACHE STRING "QSPI Flash size:   8 MiB")
+set(DDR4_BLK_SIZE         "0x10000000" CACHE STRING "DDR4 block size: 256 MiB")
+
+###################################################################################################
+#                                         Base addresses                                          #
+###################################################################################################
+set(ITCM_BASE_NS          "0x00000000" CACHE STRING "Instruction TCM Non-Secure base address")
+set(BRAM_BASE_NS          "0x01000000" CACHE STRING "CODE SRAM Non-Secure base address")
+set(DTCM0_BASE_NS         "0x20000000" CACHE STRING "Data TCM block 0 Non-Secure base address")
+set(DTCM1_BASE_NS         "0x20100000" CACHE STRING "Data TCM block 1 Non-Secure base address")
+set(DTCM2_BASE_NS         "0x20200000" CACHE STRING "Data TCM block 2 Non-Secure base address")
+set(DTCM3_BASE_NS         "0x20300000" CACHE STRING "Data TCM block 3 Non-Secure base address")
+set(QSPI_SRAM_BASE_NS     "0x28000000" CACHE STRING "QSPI SRAM Non-Secure base address")
+set(DDR4_BLK0_BASE_NS     "0x60000000" CACHE STRING "DDR4 block 0 Non-Secure base address")
+set(DDR4_BLK1_BASE_NS     "0x80000000" CACHE STRING "DDR4 block 1 Non-Secure base address")
+set(DDR4_BLK2_BASE_NS     "0xA0000000" CACHE STRING "DDR4 block 2 Non-Secure base address")
+set(DDR4_BLK3_BASE_NS     "0xC0000000" CACHE STRING "DDR4 block 3 Non-Secure base address")
+
+set(ITCM_BASE_S           "0x10000000" CACHE STRING "Instruction TCM Secure base address")
+set(BRAM_BASE_S           "0x11000000" CACHE STRING "CODE SRAM Secure base address")
+set(DTCM0_BASE_S          "0x30000000" CACHE STRING "Data TCM block 0 Secure base address")
+set(DTCM1_BASE_S          "0x30100000" CACHE STRING "Data TCM block 1 Secure base address")
+set(DTCM2_BASE_S          "0x30200000" CACHE STRING "Data TCM block 2 Secure base address")
+set(DTCM3_BASE_S          "0x30300000" CACHE STRING "Data TCM block 3 Secure base address")
+set(DDR4_BLK0_BASE_S      "0x70000000" CACHE STRING "DDR4 block 0 Secure base address")
+set(DDR4_BLK1_BASE_S      "0x90000000" CACHE STRING "DDR4 block 1 Secure base address")
+set(DDR4_BLK2_BASE_S      "0xB0000000" CACHE STRING "DDR4 block 2 Secure base address")
+set(DDR4_BLK3_BASE_S      "0xD0000000" CACHE STRING "DDR4 block 3 Secure base address")
+
+set(CMSDK_GPIO0_BASE      "0x41100000" CACHE STRING "User GPIO 0 Base Address")
+set(CMSDK_GPIO1_BASE      "0x41101000" CACHE STRING "User GPIO 1 Base Address")
+set(CMSDK_GPIO2_BASE      "0x41102000" CACHE STRING "User GPIO 2 Base Address")
+set(CMSDK_GPIO3_BASE      "0x41103000" CACHE STRING "User GPIO 3 Base Address")
+
+if (ETHOS_U55_ENABLED)
+    set(ETHOS_U55_BASE       "0x41700000" CACHE STRING "Ethos-U55 base address")
+    set(ETHOS_U55_TA0_BASE   "0x41701000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(ETHOS_U55_TA1_BASE   "0x41701200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif ()
+
+set(MPS3_I2C0_BASE        "0x41200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(MPS3_I2C1_BASE        "0x41201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(MPS3_SSP2_BASE        "0x41202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(MPS3_SSP3_BASE        "0x41203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+
+set(MPS3_SSP4_BASE        "0x41204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(MPS3_I2C2_BASE        "0x41205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(MPS3_I2C3_BASE        "0x41206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(MPS3_I2C4_BASE        "0x41207000" CACHE STRING "HDMI I2C SBCon Base Address ")
+set(MPS3_I2C5_BASE        "0x41208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+set(MPS3_SCC_BASE         "0x41300000" CACHE STRING "SCC Base Address ")
+set(MPS3_AAIC_I2S_BASE    "0x41301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(MPS3_FPGAIO_BASE      "0x41302000" CACHE STRING "FPGA IO Base Address ")
+set(CMSDK_UART0_BASE      "0x41303000" CACHE STRING "UART 0 Base Address ")
+set(CMSDK_UART1_BASE      "0x41304000" CACHE STRING "UART 1 Base Address ")
+set(CMSDK_UART2_BASE      "0x41305000" CACHE STRING "UART 2 Base Address ")
+set(CMSDK_UART3_BASE      "0x41306000" CACHE STRING "UART 3 Base Address Shield 0")
+
+set(CMSDK_UART4_BASE      "0x41307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(CMSDK_UART5_BASE      "0x41308000" CACHE STRING "UART 5 Base Address ")
+set(HDMI_AUDIO_BASE       "0x41309000" CACHE STRING "HDMI AUDIO Base Address ")
+set(CLCD_CONFIG_BASE      "0x4130A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(RTC_BASE              "0x4130B000" CACHE STRING "RTC Base address ")
+set(SMSC9220_BASE         "0x41400000" CACHE STRING "Ethernet SMSC9220 Base Address ")
+set(USB_BASE              "0x41500000" CACHE STRING "USB Base Address ")
+
+set(MPS3_eMMC_BASE        "0x41702000" CACHE STRING "User eMMC Base Address")
+set(USER_BASE             "0x41703000" CACHE STRING "User ? Base Address ")
+
+set(QSPI_XIP_BASE         "0x41800000" CACHE STRING "QSPI XIP config Base Address ")
+set(QSPI_WRITE_BASE       "0x41801000" CACHE STRING "QSPI write config Base Address ")
+
+set(SEC_CMSDK_GPIO0_BASE  "0x51100000" CACHE STRING "User GPIO 0 Base Address")
+set(SEC_CMSDK_GPIO1_BASE  "0x51101000" CACHE STRING "User GPIO 0 Base Address")
+set(SEC_CMSDK_GPIO2_BASE  "0x51102000" CACHE STRING "User GPIO 0 Base Address")
+set(SEC_CMSDK_GPIO3_BASE  "0x51103000" CACHE STRING "User GPIO 0 Base Address")
+
+set(SEC_MPS3_I2C0_BASE    "0x51200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(SEC_MPS3_I2C1_BASE    "0x51201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(SEC_MPS3_SSP2_BASE    "0x51202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(SEC_MPS3_SSP3_BASE    "0x51203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+
+set(SEC_MPS3_SSP4_BASE    "0x51204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(SEC_MPS3_I2C2_BASE    "0x51205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(SEC_MPS3_I2C3_BASE    "0x51206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(SEC_MPS3_I2C4_BASE    "0x51207000" CACHE STRING "HDMI I2C SBCon Base Address ")
+set(SEC_MPS3_I2C5_BASE    "0x51208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+set(SEC_MPS3_SCC_BASE     "0x51300000" CACHE STRING "SCC Base Address ")
+set(SEC_MPS3_AAIC_I2S_BASE     "0x51301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(SEC_MPS3_FPGAIO_BASE   "0x51302000" CACHE STRING "FPGA IO Base Address ")
+set(SEC_CMSDK_UART0_BASE   "0x51303000" CACHE STRING "UART 0 Base Address ")
+set(SEC_CMSDK_UART1_BASE   "0x51304000" CACHE STRING "UART 1 Base Address ")
+set(SEC_CMSDK_UART2_BASE   "0x51305000" CACHE STRING "UART 2 Base Address ")
+set(SEC_CMSDK_UART3_BASE   "0x51306000" CACHE STRING "UART 3 Base Address Shield 0")
+
+set(SEC_CMSDK_UART4_BASE   "0x51307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(SEC_CMSDK_UART5_BASE   "0x51308000" CACHE STRING "UART 5 Base Address ")
+set(SEC_HDMI_AUDIO_BASE    "0x51309000" CACHE STRING "HDMI AUDIO Base Address ")
+set(SEC_CLCD_CONFIG_BASE   "0x5130A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(SEC_RTC_BASE           "0x5130B000" CACHE STRING "RTC Base address ")
+set(SEC_SMSC9220_BASE      "0x51400000" CACHE STRING "Ethernet SMSC9220 Base Address ")
+set(SEC_USB_BASE           "0x51500000" CACHE STRING "USB Base Address ")
+
+if (ETHOS_U55_ENABLED)
+    set(SEC_ETHOS_U55_BASE        "0x51700000" CACHE STRING "Ethos-U55 base address")
+    set(SEC_ETHOS_U55_TA0_BASE    "0x51701000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(SEC_ETHOS_U55_TA1_BASE    "0x51701200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif ()
+
+set(SEC_MMC_BASE          "0x51702000" CACHE STRING "User eMMC Base Address")
+set(SEC_USER_BASE         "0x51703000" CACHE STRING "User ? Base Address ")
+
+set(SEC_QSPI_XIP_BASE     "0x51800000" CACHE STRING "QSPI XIP config Base Address ")
+set(SEC_QSPI_WRITE_BASE   "0x51801000" CACHE STRING "QSPI write config Base Address ")
+
+###################################################################################################
+#                                           IRQ numbers                                           #
+###################################################################################################
+set(NONSEC_WATCHDOG_RESET_IRQn    " 0" CACHE STRING " Non-Secure Watchdog Reset Interrupt")
+set(NONSEC_WATCHDOG_IRQn          " 1" CACHE STRING " Non-Secure Watchdog Interrupt         ")
+set(S32K_TIMER_IRQn               " 2" CACHE STRING " S32K Timer Interrupt                  ")
+set(TIMER0_IRQn                   " 3" CACHE STRING " TIMER 0 Interrupt                     ")
+set(TIMER1_IRQn                   " 4" CACHE STRING " TIMER 1 Interrupt                     ")
+set(DUALTIMER_IRQn                " 5" CACHE STRING " Dual Timer Interrupt                  ")
+set(MPC_IRQn                      " 9" CACHE STRING " MPC Combined (Secure) Interrupt       ")
+set(PPC_IRQn                      "10" CACHE STRING " PPC Combined (Secure) Interrupt       ")
+set(MSC_IRQn                      "11" CACHE STRING " MSC Combined (Secure) Interrput       ")
+set(BRIDGE_ERROR_IRQn             "12" CACHE STRING " Bridge Error Combined (Secure) Interrupt ")
+
+set(UARTRX0_IRQn                  "32" CACHE STRING " UART 0 RX Interrupt                   ")
+set(UARTTX0_IRQn                  "33" CACHE STRING " UART 0 TX Interrupt                   ")
+set(UARTRX1_IRQn                  "34" CACHE STRING " UART 1 RX Interrupt                   ")
+set(UARTTX1_IRQn                  "35" CACHE STRING " UART 1 TX Interrupt                   ")
+set(UARTRX2_IRQn                  "36" CACHE STRING " UART 2 RX Interrupt                   ")
+set(UARTTX2_IRQn                  "37" CACHE STRING " UART 2 TX Interrupt                   ")
+set(UARTRX3_IRQn                  "38" CACHE STRING " UART 3 RX Interrupt                   ")
+set(UARTTX3_IRQn                  "39" CACHE STRING " UART 3 TX Interrupt                   ")
+set(UARTRX4_IRQn                  "40" CACHE STRING " UART 4 RX Interrupt                   ")
+set(UARTTX4_IRQn                  "41" CACHE STRING " UART 4 TX Interrupt                   ")
+set(UART0_IRQn                    "42" CACHE STRING " UART 0 combined Interrupt             ")
+set(UART1_IRQn                    "43" CACHE STRING " UART 1 combined Interrupt             ")
+set(UART2_IRQn                    "44" CACHE STRING " UART 2 combined Interrupt             ")
+set(UART3_IRQn                    "45" CACHE STRING " UART 3 combined Interrupt             ")
+set(UART4_IRQn                    "46" CACHE STRING " UART 4 combined Interrupt             ")
+set(UARTOVF_IRQn                  "47" CACHE STRING " UART 0,1,2,3,4 Overflow Interrupt     ")
+set(ETHERNET_IRQn                 "48" CACHE STRING " Ethernet Interrupt                    ")
+set(I2S_IRQn                      "49" CACHE STRING " I2S Interrupt                         ")
+set(TSC_IRQn                      "50" CACHE STRING " Touch Screen Interrupt                ")
+set(SPI2_IRQn                     "52" CACHE STRING " SPI 2 Interrupt                       ")
+set(SPI3_IRQn                     "53" CACHE STRING " SPI 3 Interrupt                       ")
+set(SPI4_IRQn                     "54" CACHE STRING " SPI 4 Interrupt                       ")
+
+if (ETHOS_U55_ENABLED)
+    if (CPU_CORTEX_M55 EQUAL 1)
+        set(EthosU_IRQn           "55" CACHE STRING " Ethos-U55 Interrupt                   ")
+    elseif (CPU_CORTEX_M33 EQUAL 1)
+        set(EthosU_IRQn           "67" CACHE STRING " Ethos-U55 Interrupt                   ")
+    endif()
+endif ()
+
+set(GPIO0_IRQn                    "68" CACHE STRING " GPIO 0 Combined Interrupt             ")
+set(GPIO1_IRQn                    "69" CACHE STRING " GPIO 1 Combined Interrupt             ")
+set(GPIO2_IRQn                    "70" CACHE STRING " GPIO 2 Combined Interrupt             ")
+set(GPIO3_IRQn                    "71" CACHE STRING " GPIO 3 Combined Interrupt             ")
+
+set(GPIO0_0_IRQn                  "72" CACHE STRING "")
+set(GPIO0_1_IRQn                  "73" CACHE STRING "")
+set(GPIO0_2_IRQn                  "74" CACHE STRING "")
+set(GPIO0_3_IRQn                  "75" CACHE STRING "")
+set(GPIO0_4_IRQn                  "76" CACHE STRING "")
+set(GPIO0_5_IRQn                  "77" CACHE STRING "")
+set(GPIO0_6_IRQn                  "78" CACHE STRING "")
+set(GPIO0_7_IRQn                  "79" CACHE STRING "")
+set(GPIO0_8_IRQn                  "80" CACHE STRING "")
+set(GPIO0_9_IRQn                  "81" CACHE STRING "")
+set(GPIO0_10_IRQn                 "82" CACHE STRING "")
+set(GPIO0_11_IRQn                 "83" CACHE STRING "")
+set(GPIO0_12_IRQn                 "84" CACHE STRING "")
+set(GPIO0_13_IRQn                 "85" CACHE STRING "")
+set(GPIO0_14_IRQn                 "86" CACHE STRING "")
+set(GPIO0_15_IRQn                 "87" CACHE STRING "")
+set(GPIO1_0_IRQn                  "88" CACHE STRING "")
+set(GPIO1_1_IRQn                  "89" CACHE STRING "")
+set(GPIO1_2_IRQn                  "90" CACHE STRING "")
+set(GPIO1_3_IRQn                  "91" CACHE STRING "")
+set(GPIO1_4_IRQn                  "92" CACHE STRING "")
+set(GPIO1_5_IRQn                  "93" CACHE STRING "")
+set(GPIO1_6_IRQn                  "94" CACHE STRING "")
+set(GPIO1_7_IRQn                  "95" CACHE STRING "")
+set(GPIO1_8_IRQn                  "96" CACHE STRING "")
+set(GPIO1_9_IRQn                  "97" CACHE STRING "")
+set(GPIO1_10_IRQn                 "98" CACHE STRING "")
+set(GPIO1_11_IRQn                 "99" CACHE STRING "")
+set(GPIO1_12_IRQn                 "100" CACHE STRING "")
+set(GPIO1_13_IRQn                 "101" CACHE STRING "")
+set(GPIO1_14_IRQn                 "102" CACHE STRING "")
+set(GPIO1_15_IRQn                 "103" CACHE STRING "")
+set(GPIO2_0_IRQn                  "104" CACHE STRING "")
+set(GPIO2_1_IRQn                  "105" CACHE STRING "")
+set(GPIO2_2_IRQn                  "106" CACHE STRING "")
+set(GPIO2_3_IRQn                  "107" CACHE STRING "")
+set(GPIO2_4_IRQn                  "108" CACHE STRING "")
+set(GPIO2_5_IRQn                  "109" CACHE STRING "")
+set(GPIO2_6_IRQn                  "110" CACHE STRING "")
+set(GPIO2_7_IRQn                  "111" CACHE STRING "")
+set(GPIO2_8_IRQn                  "112" CACHE STRING "")
+set(GPIO2_9_IRQn                  "113" CACHE STRING "")
+set(GPIO2_10_IRQn                 "114" CACHE STRING "")
+set(GPIO2_11_IRQn                 "115" CACHE STRING "")
+set(GPIO2_12_IRQn                 "116" CACHE STRING "")
+set(GPIO2_13_IRQn                 "117" CACHE STRING "")
+set(GPIO2_14_IRQn                 "118" CACHE STRING "")
+set(GPIO2_15_IRQn                 "119" CACHE STRING "")
+set(GPIO3_0_IRQn                  "120" CACHE STRING "")
+set(GPIO3_1_IRQn                  "121" CACHE STRING "")
+set(GPIO3_2_IRQn                  "122" CACHE STRING "")
+set(GPIO3_3_IRQn                  "123" CACHE STRING "")
+set(UARTRX5_IRQn                  "124" CACHE STRING "UART 5 RX Interrupt")
+set(UARTTX5_IRQn                  "125" CACHE STRING "UART 5 TX Interrupt")
+set(UART5_IRQn                    "126" CACHE STRING "UART 5 combined Interrupt")
+set(HDCLCD_IRQn                   "127" CACHE STRING "HDCLCD Interrupt")
diff --git a/scripts/cmake/subsystem-profiles/corstone-sse-300.cmake b/scripts/cmake/subsystem-profiles/corstone-sse-300.cmake
new file mode 100644
index 0000000..8b565fe
--- /dev/null
+++ b/scripts/cmake/subsystem-profiles/corstone-sse-300.cmake
@@ -0,0 +1,309 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMake configuration file for peripheral memory map for MPS3 as per SSE-300 design
+###################################################################################################
+#                              Application specific config                                        #
+###################################################################################################
+
+# This parameter is based on the linker/scatter script for SSE-300. Do not change this parameter
+# in isolation.
+set(ACTIVATION_BUF_SRAM_SZ "0x00400000" CACHE STRING "Maximum SRAM size for activation buffers")
+set(DESIGN_NAME            "Arm Corstone-300 (SSE-300)" CACHE STRING "Design name")
+
+###################################################################################################
+#                                         Mem sizes                                               #
+###################################################################################################
+set(ITCM_SIZE             "0x00080000" CACHE STRING "ITCM size:       512 kiB")
+set(DTCM_BLK_SIZE         "0x00020000" CACHE STRING "DTCM size:       128 kiB, 4 banks")
+set(BRAM_SIZE             "0x00200000" CACHE STRING "BRAM size:         2 MiB")
+set(ISRAM0_SIZE           "0x00200000" CACHE STRING "ISRAM0 size:       2 MiB")
+set(ISRAM1_SIZE           "0x00200000" CACHE STRING "ISRAM1 size:       2 MiB")
+set(QSPI_SRAM_SIZE        "0x00800000" CACHE STRING "QSPI Flash size:   8 MiB")
+set(DDR4_BLK_SIZE         "0x10000000" CACHE STRING "DDR4 block size: 256 MiB")
+
+###################################################################################################
+#                                Base addresses for memory regions                                #
+###################################################################################################
+set(ITCM_BASE_NS          "0x00000000" CACHE STRING "Instruction TCM Non-Secure base address")
+set(BRAM_BASE_NS          "0x01000000" CACHE STRING "CODE SRAM Non-Secure base address")
+set(DTCM0_BASE_NS         "0x20000000" CACHE STRING "Data TCM block 0 Non-Secure base address")
+set(DTCM1_BASE_NS         "0x20020000" CACHE STRING "Data TCM block 1 Non-Secure base address")
+set(DTCM2_BASE_NS         "0x20040000" CACHE STRING "Data TCM block 2 Non-Secure base address")
+set(DTCM3_BASE_NS         "0x20060000" CACHE STRING "Data TCM block 3 Non-Secure base address")
+set(ISRAM0_BASE_NS        "0x21000000" CACHE STRING "Internal SRAM Area Non-Secure base address")
+set(ISRAM1_BASE_NS        "0x21200000" CACHE STRING "Internal SRAM Area Non-Secure base address")
+set(QSPI_SRAM_BASE_NS     "0x28000000" CACHE STRING "QSPI SRAM Non-Secure base address")
+set(DDR4_BLK0_BASE_NS     "0x60000000" CACHE STRING "DDR4 block 0 Non-Secure base address")
+set(DDR4_BLK1_BASE_NS     "0x80000000" CACHE STRING "DDR4 block 1 Non-Secure base address")
+set(DDR4_BLK2_BASE_NS     "0xA0000000" CACHE STRING "DDR4 block 2 Non-Secure base address")
+set(DDR4_BLK3_BASE_NS     "0xC0000000" CACHE STRING "DDR4 block 3 Non-Secure base address")
+
+set(ITCM_BASE_S           "0x10000000" CACHE STRING "Instruction TCM Secure base address")
+set(BRAM_BASE_S           "0x11000000" CACHE STRING "CODE SRAM Secure base address")
+set(DTCM0_BASE_S          "0x30000000" CACHE STRING "Data TCM block 0 Secure base address")
+set(DTCM1_BASE_S          "0x30020000" CACHE STRING "Data TCM block 1 Secure base address")
+set(DTCM2_BASE_S          "0x30040000" CACHE STRING "Data TCM block 2 Secure base address")
+set(DTCM3_BASE_S          "0x30060000" CACHE STRING "Data TCM block 3 Secure base address")
+set(ISRAM0_BASE_S         "0x31000000" CACHE STRING "Internal SRAM Area Secure base address")
+set(ISRAM1_BASE_S         "0x31200000" CACHE STRING "Internal SRAM Area Secure base address")
+set(DDR4_BLK0_BASE_S      "0x70000000" CACHE STRING "DDR4 block 0 Secure base address")
+set(DDR4_BLK1_BASE_S      "0x90000000" CACHE STRING "DDR4 block 1 Secure base address")
+set(DDR4_BLK2_BASE_S      "0xB0000000" CACHE STRING "DDR4 block 2 Secure base address")
+set(DDR4_BLK3_BASE_S      "0xD0000000" CACHE STRING "DDR4 block 3 Secure base address")
+
+
+###################################################################################################
+#                     Base addresses for peripherals - non secure                                 #
+###################################################################################################
+set(CMSDK_GPIO0_BASE      "0x41100000" CACHE STRING "User GPIO 0 Base Address (4KB)")
+set(CMSDK_GPIO1_BASE      "0x41101000" CACHE STRING "User GPIO 1 Base Address (4KB)")
+set(CMSDK_GPIO2_BASE      "0x41102000" CACHE STRING "User GPIO 2 Base Address (4KB)")
+set(CMSDK_GPIO3_BASE      "0x41103000" CACHE STRING "User GPIO 3 Base Address (4KB)")
+
+set(AHB_USER0_BASE        "0x41104000" CACHE STRING "AHB USER 0 Base Address (4KB)")
+set(AHB_USER1_BASE        "0x41105000" CACHE STRING "AHB USER 1 Base Address (4KB)")
+set(AHB_USER2_BASE        "0x41106000" CACHE STRING "AHB USER 2 Base Address (4KB)")
+set(AHB_USER3_BASE        "0x41107000" CACHE STRING "AHB USER 3 Base Address (4KB)")
+
+set(DMA0_BASE             "0x41200000" CACHE STRING "DMA0 (4KB)")
+set(DMA1_BASE             "0x41201000" CACHE STRING "DMA1 (4KB)")
+set(DMA2_BASE             "0x41202000" CACHE STRING "DMA2 (4KB)")
+set(DMA3_BASE             "0x41203000" CACHE STRING "DMA3 (4KB)")
+
+set(SMSC9220_BASE         "0x41400000" CACHE STRING "Ethernet SMSC9220 Base Address (1MB)")
+set(USB_BASE              "0x41500000" CACHE STRING "USB Base Address (1MB)")
+
+set(USER_APB0_BASE        "0x41700000" CACHE STRING "User APB0")
+set(USER_APB1_BASE        "0x41701000" CACHE STRING "User APB1")
+set(USER_APB2_BASE        "0x41702000" CACHE STRING "User APB2")
+set(USER_APB3_BASE        "0x41703000" CACHE STRING "User APB3")
+
+set(QSPI_XIP_BASE         "0x41800000" CACHE STRING "QSPI XIP config Base Address ")
+set(QSPI_WRITE_BASE       "0x41801000" CACHE STRING "QSPI write config Base Address ")
+
+if (ETHOS_U55_ENABLED)
+    set(ETHOS_U55_BASE        "0x48102000" CACHE STRING "Ethos-U55 base address")
+    set(ETHOS_U55_TA0_BASE    "0x48103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(ETHOS_U55_TA1_BASE    "0x48103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif (ETHOS_U55_ENABLED)
+
+set(MPS3_I2C0_BASE        "0x49200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(MPS3_I2C1_BASE        "0x49201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(MPS3_SSP2_BASE        "0x49202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(MPS3_SSP3_BASE        "0x49203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+set(MPS3_SSP4_BASE        "0x49204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(MPS3_I2C2_BASE        "0x49205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(MPS3_I2C3_BASE        "0x49206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(USER_APB_BASE         "0x49207000" CACHE STRING "User APB")
+set(MPS3_I2C5_BASE        "0x49208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+
+set(MPS3_SCC_BASE         "0x49300000" CACHE STRING "SCC Base Address ")
+set(MPS3_AAIC_I2S_BASE    "0x49301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(MPS3_FPGAIO_BASE      "0x49302000" CACHE STRING "FPGA IO Base Address ")
+
+set(CMSDK_UART0_BASE      "0x49303000" CACHE STRING "UART 0 Base Address ")
+set(CMSDK_UART1_BASE      "0x49304000" CACHE STRING "UART 1 Base Address ")
+set(CMSDK_UART2_BASE      "0x49305000" CACHE STRING "UART 2 Base Address ")
+set(CMSDK_UART3_BASE      "0x49306000" CACHE STRING "UART 3 Base Address Shield 0")
+set(CMSDK_UART4_BASE      "0x49307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(CMSDK_UART5_BASE      "0x49308000" CACHE STRING "UART 5 Base Address ")
+
+set(CLCD_CONFIG_BASE      "0x4930A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(RTC_BASE              "0x4930B000" CACHE STRING "RTC Base address ")
+
+###################################################################################################
+#                     Base addresses for peripherals - secure                                     #
+###################################################################################################
+set(SEC_CMSDK_GPIO0_BASE   "0x51100000" CACHE STRING "User GPIO 0 Base Address (4KB)")
+set(SEC_CMSDK_GPIO1_BASE   "0x51101000" CACHE STRING "User GPIO 1 Base Address (4KB)")
+set(SEC_CMSDK_GPIO2_BASE   "0x51102000" CACHE STRING "User GPIO 2 Base Address (4KB)")
+set(SEC_CMSDK_GPIO3_BASE   "0x51103000" CACHE STRING "User GPIO 3 Base Address (4KB)")
+
+set(SEC_AHB_USER0_BASE     "0x51104000" CACHE STRING "AHB USER 0 Base Address (4KB)")
+set(SEC_AHB_USER1_BASE     "0x51105000" CACHE STRING "AHB USER 1 Base Address (4KB)")
+set(SEC_AHB_USER2_BASE     "0x51106000" CACHE STRING "AHB USER 2 Base Address (4KB)")
+set(SEC_AHB_USER3_BASE     "0x51107000" CACHE STRING "AHB USER 3 Base Address (4KB)")
+
+set(SEC_DMA0_BASE          "0x51200000" CACHE STRING "DMA0 (4KB)")
+set(SEC_DMA1_BASE          "0x51201000" CACHE STRING "DMA1 (4KB)")
+set(SEC_DMA2_BASE          "0x51202000" CACHE STRING "DMA2 (4KB)")
+set(SEC_DMA3_BASE          "0x51203000" CACHE STRING "DMA3 (4KB)")
+
+set(SEC_SMSC9220_BASE      "0x51400000" CACHE STRING "Ethernet SMSC9220 Base Address (1MB)")
+set(SEC_USB_BASE           "0x51500000" CACHE STRING "USB Base Address (1MB)")
+
+set(SEC_USER_APB0_BASE     "0x51700000" CACHE STRING "User APB0 Base Address")
+set(SEC_USER_APB1_BASE     "0x51701000" CACHE STRING "User APB1 Base Address")
+set(SEC_USER_APB2_BASE     "0x51702000" CACHE STRING "User APB2 Base Address")
+set(SEC_USER_APB3_BASE     "0x51703000" CACHE STRING "User APB3 Base Address")
+
+set(SEC_QSPI_XIP_BASE      "0x51800000" CACHE STRING "QSPI XIP config Base Address ")
+set(SEC_QSPI_WRITE_BASE    "0x51801000" CACHE STRING "QSPI write config Base Address ")
+
+if (ETHOS_U55_ENABLED)
+    set(SEC_ETHOS_U55_BASE     "0x58102000" CACHE STRING "Ethos-U55 base address")
+    set(SEC_ETHOS_U55_TA0_BASE "0x58103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(SEC_ETHOS_U55_TA1_BASE "0x58103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif (ETHOS_U55_ENABLED)
+
+set(SEC_MPS3_I2C0_BASE     "0x58200000" CACHE STRING "Touch Screen I2C Base Address ")
+set(SEC_MPS3_I2C1_BASE     "0x58201000" CACHE STRING "Audio Interface I2C Base Address ")
+set(SEC_MPS3_SSP2_BASE     "0x58202000" CACHE STRING "ADC SPI PL022 Base Address")
+set(SEC_MPS3_SSP3_BASE     "0x58203000" CACHE STRING "Shield 0 SPI PL022 Base Address")
+set(SEC_MPS3_SSP4_BASE     "0x58204000" CACHE STRING "Shield 1 SPI PL022 Base Address")
+set(SEC_MPS3_I2C2_BASE     "0x58205000" CACHE STRING "Shield 0 SBCon Base Address ")
+set(SEC_MPS3_I2C3_BASE     "0x58206000" CACHE STRING "Shield 1 SBCon Base Address ")
+
+set(SEC_USER_APB_BASE      "0x58207000" CACHE STRING "User APB Base Address")
+set(SEC_MPS3_I2C5_BASE     "0x58208000" CACHE STRING "DDR EPROM I2C SBCon Base Address ")
+
+set(SEC_MPS3_SCC_BASE         "0x58300000" CACHE STRING "SCC Base Address ")
+set(SEC_MPS3_AAIC_I2S_BASE    "0x58301000" CACHE STRING "Audio Interface I2S Base Address ")
+set(SEC_MPS3_FPGAIO_BASE      "0x58302000" CACHE STRING "FPGA IO Base Address ")
+
+set(SEC_CMSDK_UART0_BASE      "0x58303000" CACHE STRING "UART 0 Base Address ")
+set(SEC_CMSDK_UART1_BASE      "0x58304000" CACHE STRING "UART 1 Base Address ")
+set(SEC_CMSDK_UART2_BASE      "0x58305000" CACHE STRING "UART 2 Base Address ")
+set(SEC_CMSDK_UART3_BASE      "0x58306000" CACHE STRING "UART 3 Base Address Shield 0")
+set(SEC_CMSDK_UART4_BASE      "0x58307000" CACHE STRING "UART 4 Base Address Shield 1")
+set(SEC_CMSDK_UART5_BASE      "0x58308000" CACHE STRING "UART 5 Base Address ")
+
+set(SEC_CLCD_CONFIG_BASE      "0x5830A000" CACHE STRING "CLCD CONFIG Base Address ")
+set(SEC_RTC_BASE              "0x5830B000" CACHE STRING "RTC Base address ")
+
+
+###################################################################################################
+#                                           MPCs                                                  #
+###################################################################################################
+set(MPC_ISRAM0_BASE_S     "0x50083000" CACHE STRING "ISRAM0 Memory Protection Controller Secure base address")
+set(MPC_ISRAM1_BASE_S     "0x50084000" CACHE STRING "ISRAM1 Memory Protection Controller Secure base address")
+set(MPC_BRAM_BASE_S       "0x57000000" CACHE STRING "SRAM Memory Protection Controller Secure base address")
+set(MPC_QSPI_BASE_S       "0x57001000" CACHE STRING "QSPI Memory Protection Controller Secure base address")
+set(MPC_DDR4_BASE_S       "0x57002000" CACHE STRING "DDR4 Memory Protection Controller Secure base address")
+
+###################################################################################################
+#                                           IRQ numbers                                           #
+###################################################################################################
+set(NONSEC_WATCHDOG_RESET_IRQn    " 0" CACHE STRING " Non-Secure Watchdog Reset Interrupt")
+set(NONSEC_WATCHDOG_IRQn          " 1" CACHE STRING " Non-Secure Watchdog Interrupt         ")
+set(S32K_TIMER_IRQn               " 2" CACHE STRING " S32K Timer Interrupt                  ")
+set(TIMER0_IRQn                   " 3" CACHE STRING " TIMER 0 Interrupt                     ")
+set(TIMER1_IRQn                   " 4" CACHE STRING " TIMER 1 Interrupt                     ")
+set(DUALTIMER_IRQn                " 5" CACHE STRING " Dual Timer Interrupt                  ")
+set(MPC_IRQn                      " 9" CACHE STRING " MPC Combined (Secure) Interrupt       ")
+set(PPC_IRQn                      "10" CACHE STRING " PPC Combined (Secure) Interrupt       ")
+set(MSC_IRQn                      "11" CACHE STRING " MSC Combined (Secure) Interrput       ")
+set(BRIDGE_ERROR_IRQn             "12" CACHE STRING " Bridge Error Combined (Secure) Interrupt ")
+set(MGMT_PPU_IRQn                 "14" CACHE STRING " MGMT_PPU" )
+set(SYS_PPU_IRQn                  "15" CACHE STRING " SYS_PPU" )
+set(CPU0_PPU_IRQn                 "16" CACHE STRING " CPU0_PPU" )
+set(DEBUG_PPU_IRQn                "26" CACHE STRING " DEBUG_PPU" )
+set(TIMER3_AON_IRQn               "27" CACHE STRING " TIMER3_AON" )
+set(CPU0CTIIQ0_IRQn               "28" CACHE STRING " CPU0CTIIQ0" )
+set(CPU0CTIIQ01_IRQn              "29" CACHE STRING " CPU0CTIIQ01" )
+
+set(SYS_TSTAMP_COUNTER_IRQn       "32" CACHE STRING " System timestamp counter interrupt" )
+set(UARTRX0_IRQn                  "33" CACHE STRING " UART 0 RX Interrupt                   ")
+set(UARTTX0_IRQn                  "34" CACHE STRING " UART 0 TX Interrupt                   ")
+set(UARTRX1_IRQn                  "35" CACHE STRING " UART 1 RX Interrupt                   ")
+set(UARTTX1_IRQn                  "36" CACHE STRING " UART 1 TX Interrupt                   ")
+set(UARTRX2_IRQn                  "37" CACHE STRING " UART 2 RX Interrupt                   ")
+set(UARTTX2_IRQn                  "38" CACHE STRING " UART 2 TX Interrupt                   ")
+set(UARTRX3_IRQn                  "39" CACHE STRING " UART 3 RX Interrupt                   ")
+set(UARTTX3_IRQn                  "40" CACHE STRING " UART 3 TX Interrupt                   ")
+set(UARTRX4_IRQn                  "41" CACHE STRING " UART 4 RX Interrupt                   ")
+set(UARTTX4_IRQn                  "42" CACHE STRING " UART 4 TX Interrupt                   ")
+set(UART0_IRQn                    "43" CACHE STRING " UART 0 combined Interrupt             ")
+set(UART1_IRQn                    "44" CACHE STRING " UART 1 combined Interrupt             ")
+set(UART2_IRQn                    "45" CACHE STRING " UART 2 combined Interrupt             ")
+set(UART3_IRQn                    "46" CACHE STRING " UART 3 combined Interrupt             ")
+set(UART4_IRQn                    "47" CACHE STRING " UART 4 combined Interrupt             ")
+set(UARTOVF_IRQn                  "48" CACHE STRING " UART 0,1,2,3,4 Overflow Interrupt     ")
+set(ETHERNET_IRQn                 "49" CACHE STRING " Ethernet Interrupt                    ")
+set(I2S_IRQn                      "50" CACHE STRING " Audio I2S Interrupt                   ")
+set(TSC_IRQn                      "51" CACHE STRING " Touch Screen Interrupt                ")
+set(USB_IRQn                      "52" CACHE STRING " USB Interrupt                         ")
+set(SPI2_IRQn                     "53" CACHE STRING " ADC (SPI) Interrupt                   ")
+set(SPI3_IRQn                     "54" CACHE STRING " SPI 3 Interrupt (Shield 0)            ")
+set(SPI4_IRQn                     "55" CACHE STRING " SPI 4 Interrupt (Sheild 1)            ")
+
+if (ETHOS_U55_ENABLED)
+set(EthosU_IRQn                   "56" CACHE STRING " Ethos-U55 Interrupt                  ")
+endif ()
+
+set(GPIO0_IRQn                    "69" CACHE STRING " GPIO 0 Combined Interrupt             ")
+set(GPIO1_IRQn                    "70" CACHE STRING " GPIO 1 Combined Interrupt             ")
+set(GPIO2_IRQn                    "71" CACHE STRING " GPIO 2 Combined Interrupt             ")
+set(GPIO3_IRQn                    "72" CACHE STRING " GPIO 3 Combined Interrupt             ")
+
+set(GPIO0_0_IRQn                  "73" CACHE STRING "")
+set(GPIO0_1_IRQn                  "74" CACHE STRING "")
+set(GPIO0_2_IRQn                  "75" CACHE STRING "")
+set(GPIO0_3_IRQn                  "76" CACHE STRING "")
+set(GPIO0_4_IRQn                  "77" CACHE STRING "")
+set(GPIO0_5_IRQn                  "78" CACHE STRING "")
+set(GPIO0_6_IRQn                  "79" CACHE STRING "")
+set(GPIO0_7_IRQn                  "80" CACHE STRING "")
+set(GPIO0_8_IRQn                  "81" CACHE STRING "")
+set(GPIO0_9_IRQn                  "82" CACHE STRING "")
+set(GPIO0_10_IRQn                 "83" CACHE STRING "")
+set(GPIO0_11_IRQn                 "84" CACHE STRING "")
+set(GPIO0_12_IRQn                 "85" CACHE STRING "")
+set(GPIO0_13_IRQn                 "86" CACHE STRING "")
+set(GPIO0_14_IRQn                 "87" CACHE STRING "")
+set(GPIO0_15_IRQn                 "88" CACHE STRING "")
+set(GPIO1_0_IRQn                  "89" CACHE STRING "")
+set(GPIO1_1_IRQn                  "90" CACHE STRING "")
+set(GPIO1_2_IRQn                  "91" CACHE STRING "")
+set(GPIO1_3_IRQn                  "92" CACHE STRING "")
+set(GPIO1_4_IRQn                  "93" CACHE STRING "")
+set(GPIO1_5_IRQn                  "94" CACHE STRING "")
+set(GPIO1_6_IRQn                  "95" CACHE STRING "")
+set(GPIO1_7_IRQn                  "96" CACHE STRING "")
+set(GPIO1_8_IRQn                  "97" CACHE STRING "")
+set(GPIO1_9_IRQn                  "98" CACHE STRING "")
+set(GPIO1_10_IRQn                 "99" CACHE STRING "")
+set(GPIO1_11_IRQn                 "100" CACHE STRING "")
+set(GPIO1_12_IRQn                 "101" CACHE STRING "")
+set(GPIO1_13_IRQn                 "102" CACHE STRING "")
+set(GPIO1_14_IRQn                 "103" CACHE STRING "")
+set(GPIO1_15_IRQn                 "104" CACHE STRING "")
+set(GPIO2_0_IRQn                  "105" CACHE STRING "")
+set(GPIO2_1_IRQn                  "106" CACHE STRING "")
+set(GPIO2_2_IRQn                  "107" CACHE STRING "")
+set(GPIO2_3_IRQn                  "108" CACHE STRING "")
+set(GPIO2_4_IRQn                  "109" CACHE STRING "")
+set(GPIO2_5_IRQn                  "110" CACHE STRING "")
+set(GPIO2_6_IRQn                  "111" CACHE STRING "")
+set(GPIO2_7_IRQn                  "112" CACHE STRING "")
+set(GPIO2_8_IRQn                  "113" CACHE STRING "")
+set(GPIO2_9_IRQn                  "114" CACHE STRING "")
+set(GPIO2_10_IRQn                 "115" CACHE STRING "")
+set(GPIO2_11_IRQn                 "116" CACHE STRING "")
+set(GPIO2_12_IRQn                 "117" CACHE STRING "")
+set(GPIO2_13_IRQn                 "118" CACHE STRING "")
+set(GPIO2_14_IRQn                 "119" CACHE STRING "")
+set(GPIO2_15_IRQn                 "120" CACHE STRING "")
+set(GPIO3_0_IRQn                  "121" CACHE STRING "")
+set(GPIO3_1_IRQn                  "122" CACHE STRING "")
+set(GPIO3_2_IRQn                  "123" CACHE STRING "")
+set(GPIO3_3_IRQn                  "124" CACHE STRING "")
+set(UARTRX5_IRQn                  "125" CACHE STRING "UART 5 RX Interrupt")
+set(UARTTX5_IRQn                  "126" CACHE STRING "UART 5 TX Interrupt")
+set(UART5_IRQn                    "127" CACHE STRING "UART 5 combined Interrupt")
diff --git a/scripts/cmake/subsystem-profiles/simple_platform.cmake b/scripts/cmake/subsystem-profiles/simple_platform.cmake
new file mode 100644
index 0000000..c11706d
--- /dev/null
+++ b/scripts/cmake/subsystem-profiles/simple_platform.cmake
@@ -0,0 +1,51 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+# CMake configuration file for peripheral memory map for simple platform. This is a stripped down
+# version of Arm Corstone-300 platform with minimal peripherals to be able to use Ethos-U55. However,
+# for ease of integration with Arm FastModel Tools, it uses PL011 as the UART component instead of
+# the CMSDK UART block used by the MPS3 FPGA and FVP implementations.
+
+###################################################################################################
+#                              Application specific config                                        #
+###################################################################################################
+
+# This parameter is based on the linker/scatter script for internal FVP. Do not change this
+# parameter in isolation.
+set(ACTIVATION_BUF_SRAM_SZ "0x00200000"      CACHE STRING "Maximum SRAM size for activation buffers")
+set(DESIGN_NAME            "Simple platform" CACHE STRING "Design name")
+
+###################################################################################################
+#                                         Base addresses                                          #
+###################################################################################################
+set(PL011_UART0_BASE            "0x49303000" CACHE STRING "PL011 UART 0 Base Address")
+
+if (ETHOS_U55_ENABLED)
+    set(ETHOS_U55_BASE          "0x48102000" CACHE STRING "Ethos-U55 base address")
+    set(ETHOS_U55_TA0_BASE      "0x48103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(ETHOS_U55_TA1_BASE      "0x48103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+    set(SEC_ETHOS_U55_BASE      "0x58102000" CACHE STRING "Ethos-U55 base address")
+    set(SEC_ETHOS_U55_TA0_BASE  "0x58103000" CACHE STRING "Ethos-U55's timing adapter 0 base address")
+    set(SEC_ETHOS_U55_TA1_BASE  "0x58103200" CACHE STRING "Ethos-U55's timing adapter 1 base address")
+endif ()
+
+###################################################################################################
+#                                           IRQ numbers                                           #
+###################################################################################################
+if (ETHOS_U55_ENABLED)
+    set(EthosU_IRQn             "56"         CACHE STRING "Ethos-U55 Interrupt")
+endif ()
diff --git a/scripts/cmake/ta_config.cmake b/scripts/cmake/ta_config.cmake
new file mode 100644
index 0000000..427884c
--- /dev/null
+++ b/scripts/cmake/ta_config.cmake
@@ -0,0 +1,64 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+#----------------------------------------------------------------------------
+# CMake description file for the Ethos-U55 Timing Adapter settings (single
+# NPU core with two AXIs).
+#----------------------------------------------------------------------------
+
+set(TA0_BASE "${SEC_ETHOS_U55_TA0_BASE}"   CACHE STRING "Timing adapter 0: base-address")
+set(TA1_BASE "${SEC_ETHOS_U55_TA1_BASE}"   CACHE STRING "Timing adapter 1: base-address")
+
+message(STATUS "using TA0_BASE @ ${TA0_BASE}; TA1_BASE @ ${TA1_BASE}.")
+
+# Timing adapter settings for AXI0
+set(TA0_MAXR        "8"        CACHE STRING "6-bit field. Max no. of pending reads. 0=infinite")
+set(TA0_MAXW        "8"        CACHE STRING "6-bit field. Max no. of pending writes. 0=infinite")
+set(TA0_MAXRW       "0"        CACHE STRING "6-bit field. Max no. of pending reads+writes. 0=infinite")
+set(TA0_RLATENCY    "32"       CACHE STRING "12-bit field. Minimum latency (clock cycles) from AVALID to RVALID.")
+set(TA0_WLATENCY    "32"       CACHE STRING "12-bit field. Minimum latency (clock cycles) from WVALID&WLAST to BVALID.")
+set(TA0_PULSE_ON    "3999"     CACHE STRING "No. of cycles addresses let through (0-65535).")
+set(TA0_PULSE_OFF   "1"        CACHE STRING "No. of cycles addresses blocked (0-65535).")
+set(TA0_BWCAP       "4000"     CACHE STRING "16-bit field. Max no. of 64-bit words transfered per pulse cycle 0=infinite")
+set(TA0_PERFCTRL    "0"        CACHE STRING "6-bit field selecting an event for event counter 0=default")
+set(TA0_PERFCNT     "0"        CACHE STRING "32-bit event counter")
+set(TA0_MODE        "1"        CACHE STRING "Bit 0: 1=enable dynamic clocking to avoid underrun;
+                                             Bit 1: 1=enable random AR reordering (0=default);
+                                             Bit 2: 1=enable random R reordering (0=default);
+                                             Bit 3: 1=enable random B reordering (0=default);
+                                             Bit 11-4: Frequency scale 0=full speed, 255=(1/256) speed")
+set(TA0_HISTBIN     "0"        CACHE STRING "Controls which histogram bin (0-15) that should be accessed by HISTCNT.")
+set(TA0_HISTCNT     "0"        CACHE STRING "32-bit field. Read/write the selected histogram bin.")
+
+# Timing adapter settings for AXI1
+set(TA1_MAXR        "2"       CACHE STRING "6-bit field. Max no. of pending reads. 0=infinite")
+set(TA1_MAXW        "0"       CACHE STRING "6-bit field. Max no. of pending writes. 0=infinite")
+set(TA1_MAXRW       "0"       CACHE STRING "6-bit field. Max no. of pending reads+writes. 0=infinite")
+set(TA1_RLATENCY    "64"      CACHE STRING "12-bit field. Minimum latency (clock cycles) from AVALID to RVALID.")
+set(TA1_WLATENCY    "0"       CACHE STRING "12-bit field. Minimum latency (clock cycles) from WVALID&WLAST to BVALID.")
+set(TA1_PULSE_ON    "320"     CACHE STRING "No. of cycles addresses let through (0-65535).")
+set(TA1_PULSE_OFF   "80"      CACHE STRING "No. of cycles addresses blocked (0-65535).")
+set(TA1_BWCAP       "50"      CACHE STRING "16-bit field. Max no. of 64-bit words transfered per pulse cycle 0=infinite")
+set(TA1_PERFCTRL    "0"       CACHE STRING "6-bit field selecting an event for event counter 0=default")
+set(TA1_PERFCNT     "0"       CACHE STRING "32-bit event counter")
+set(TA1_MODE        "1"       CACHE STRING "Bit 0: 1=enable dynamic clocking to avoid underrun;
+                                            Bit 1: 1=enable random AR reordering (0=default);
+                                            Bit 2: 1=enable random R reordering (0=default);
+                                            Bit 3: 1=enable random B reordering (0=default);
+                                            Bit 11-4: Frequency scale 0=full speed, 255=(1/256) speed")
+set(TA1_HISTBIN     "0"       CACHE STRING "Controls which histogram bin (0-15) that should be accessed by HISTCNT.")
+set(TA1_HISTCNT     "0"       CACHE STRING "32-bit field. Read/write the selected histogram bin.")
diff --git a/scripts/cmake/templates/mem_regions.h.template b/scripts/cmake/templates/mem_regions.h.template
new file mode 100644
index 0000000..72978ce
--- /dev/null
+++ b/scripts/cmake/templates/mem_regions.h.template
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef MEM_REGION_DEFS_H
+#define MEM_REGION_DEFS_H
+
+#cmakedefine ITCM_SIZE             (@ITCM_SIZE@)     /* ITCM size */
+#cmakedefine DTCM_BLK_SIZE         (@DTCM_BLK_SIZE@)     /* DTCM size, 4 banks of this size available */
+#cmakedefine BRAM_SIZE             (@BRAM_SIZE@)     /* BRAM size */
+#cmakedefine ISRAM0_SIZE           (@ISRAM0_SIZE@)     /* ISRAM0 size */
+#cmakedefine ISRAM1_SIZE           (@ISRAM1_SIZE@)     /* ISRAM1 size */
+#cmakedefine QSPI_SRAM_SIZE        (@QSPI_SRAM_SIZE@)     /* QSPI Flash size */
+#cmakedefine DDR4_BLK_SIZE         (@DDR4_BLK_SIZE@)     /* DDR4 block size */
+
+#cmakedefine ITCM_BASE_NS          (@ITCM_BASE_NS@)     /* Instruction TCM Non-Secure base address */
+#cmakedefine BRAM_BASE_NS          (@BRAM_BASE_NS@)     /* CODE SRAM Non-Secure base address */
+#cmakedefine DTCM0_BASE_NS         (@DTCM0_BASE_NS@)     /* Data TCM block 0 Non-Secure base address */
+#cmakedefine DTCM1_BASE_NS         (@DTCM1_BASE_NS@)     /* Data TCM block 1 Non-Secure base address */
+#cmakedefine DTCM2_BASE_NS         (@DTCM2_BASE_NS@)     /* Data TCM block 2 Non-Secure base address */
+#cmakedefine DTCM3_BASE_NS         (@DTCM3_BASE_NS@)     /* Data TCM block 3 Non-Secure base address */
+#cmakedefine ISRAM0_BASE_NS        (@ISRAM0_BASE_NS@)     /* Internal SRAM Area Non-Secure base address */
+#cmakedefine ISRAM1_BASE_NS        (@ISRAM1_BASE_NS@)     /* Internal SRAM Area Non-Secure base address */
+#cmakedefine QSPI_SRAM_BASE_NS     (@QSPI_SRAM_BASE_NS@)     /* QSPI SRAM Non-Secure base address */
+#cmakedefine DDR4_BLK0_BASE_NS     (@DDR4_BLK0_BASE_NS@)     /* DDR4 block 0 Non-Secure base address */
+#cmakedefine DDR4_BLK1_BASE_NS     (@DDR4_BLK1_BASE_NS@)     /* DDR4 block 1 Non-Secure base address */
+#cmakedefine DDR4_BLK2_BASE_NS     (@DDR4_BLK2_BASE_NS@)     /* DDR4 block 2 Non-Secure base address */
+#cmakedefine DDR4_BLK3_BASE_NS     (@DDR4_BLK3_BASE_NS@)     /* DDR4 block 3 Non-Secure base address */
+
+#cmakedefine ITCM_BASE_S           (@ITCM_BASE_S@)     /* Instruction TCM Secure base address */
+#cmakedefine BRAM_BASE_S           (@BRAM_BASE_S@)     /* CODE SRAM Secure base address */
+#cmakedefine DTCM0_BASE_S          (@DTCM0_BASE_S@)     /* Data TCM block 0 Secure base address */
+#cmakedefine DTCM1_BASE_S          (@DTCM1_BASE_S@)     /* Data TCM block 1 Secure base address */
+#cmakedefine DTCM2_BASE_S          (@DTCM2_BASE_S@)     /* Data TCM block 2 Secure base address */
+#cmakedefine DTCM3_BASE_S          (@DTCM3_BASE_S@)     /* Data TCM block 3 Secure base address */
+#cmakedefine ISRAM0_BASE_S         (@ISRAM0_BASE_S@)     /* Internal SRAM Area Secure base address */
+#cmakedefine ISRAM1_BASE_S         (@ISRAM1_BASE_S@)     /* Internal SRAM Area Secure base address */
+#cmakedefine DDR4_BLK0_BASE_S      (@DDR4_BLK0_BASE_S@)     /* DDR4 block 0 Secure base address */
+#cmakedefine DDR4_BLK1_BASE_S      (@DDR4_BLK1_BASE_S@)     /* DDR4 block 1 Secure base address */
+#cmakedefine DDR4_BLK2_BASE_S      (@DDR4_BLK2_BASE_S@)     /* DDR4 block 2 Secure base address */
+#cmakedefine DDR4_BLK3_BASE_S      (@DDR4_BLK3_BASE_S@)     /* DDR4 block 3 Secure base address */
+
+#endif /*  MEM_REGION_DEFS_H  */
diff --git a/scripts/cmake/templates/peripheral_irqs.h.template b/scripts/cmake/templates/peripheral_irqs.h.template
new file mode 100644
index 0000000..8e8888b
--- /dev/null
+++ b/scripts/cmake/templates/peripheral_irqs.h.template
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef PERIPHERAL_IRQS_H
+#define PERIPHERAL_IRQS_H
+
+/******************************************************************************/
+/*                    Peripheral interrupt numbers                            */
+/******************************************************************************/
+
+/* -------------------  Cortex-M Processor Exceptions Numbers  -------------- */
+/*                 -14 to -1 should be defined by the system header           */
+/* ----------------------  Core Specific Interrupt Numbers  ------------------*/
+#cmakedefine NONSEC_WATCHDOG_RESET_IRQn (@NONSEC_WATCHDOG_RESET_IRQn@)  /* Non-Secure Watchdog Reset Interrupt   */
+#cmakedefine NONSEC_WATCHDOG_IRQn       (@NONSEC_WATCHDOG_IRQn@)  /* Non-Secure Watchdog Interrupt         */
+#cmakedefine S32K_TIMER_IRQn            (@S32K_TIMER_IRQn@)  /* S32K Timer Interrupt                  */
+#cmakedefine TIMER0_IRQn                (@TIMER0_IRQn@)  /* TIMER 0 Interrupt                     */
+#cmakedefine TIMER1_IRQn                (@TIMER1_IRQn@)  /* TIMER 1 Interrupt                     */
+#cmakedefine DUALTIMER_IRQn             (@DUALTIMER_IRQn@)  /* Dual Timer Interrupt                  */
+#cmakedefine MPC_IRQn                   (@MPC_IRQn@)  /* MPC Combined (@Secure@) Interrupt       */
+#cmakedefine PPC_IRQn                   (@PPC_IRQn@)  /* PPC Combined (@Secure@) Interrupt       */
+#cmakedefine MSC_IRQn                   (@MSC_IRQn@)  /* MSC Combined (@Secure@) Interrput       */
+#cmakedefine BRIDGE_ERROR_IRQn          (@BRIDGE_ERROR_IRQn@)  /* Bridge Error Combined (@Secure@) Interrupt */
+#cmakedefine MGMT_PPU_IRQn              (@MGMT_PPU_IRQn@)  /* MGMT_PPU */
+#cmakedefine SYS_PPU_IRQn               (@SYS_PPU_IRQn@)  /* SYS_PPU */
+#cmakedefine CPU0_PPU_IRQn              (@CPU0_PPU_IRQn@)  /* CPU0_PPU */
+#cmakedefine DEBUG_PPU_IRQn             (@DEBUG_PPU_IRQn@)  /* DEBUG_PPU */
+#cmakedefine TIMER3_AON_IRQn            (@TIMER3_AON_IRQn@)  /* TIMER3_AON */
+#cmakedefine CPU0CTIIQ0_IRQn            (@CPU0CTIIQ0_IRQn@)  /* CPU0CTIIQ0 */
+#cmakedefine CPU0CTIIQ01_IRQn           (@CPU0CTIIQ01_IRQn@)  /* CPU0CTIIQ01 */
+
+#cmakedefine SYS_TSTAMP_COUNTER_IRQn    (@SYS_TSTAMP_COUNTER_IRQn@)  /* System timestamp counter interrupt */
+
+/* ----------------------  CMSDK Specific Interrupt Numbers  ----------------- */
+#cmakedefine UARTRX0_IRQn               (@UARTRX0_IRQn@)  /* UART 0 RX Interrupt                   */
+#cmakedefine UARTTX0_IRQn               (@UARTTX0_IRQn@)  /* UART 0 TX Interrupt                   */
+#cmakedefine UARTRX1_IRQn               (@UARTRX1_IRQn@)  /* UART 1 RX Interrupt                   */
+#cmakedefine UARTTX1_IRQn               (@UARTTX1_IRQn@)  /* UART 1 TX Interrupt                   */
+#cmakedefine UARTRX2_IRQn               (@UARTRX2_IRQn@)  /* UART 2 RX Interrupt                   */
+#cmakedefine UARTTX2_IRQn               (@UARTTX2_IRQn@)  /* UART 2 TX Interrupt                   */
+#cmakedefine UARTRX3_IRQn               (@UARTRX3_IRQn@)  /* UART 3 RX Interrupt                   */
+#cmakedefine UARTTX3_IRQn               (@UARTTX3_IRQn@)  /* UART 3 TX Interrupt                   */
+#cmakedefine UARTRX4_IRQn               (@UARTRX4_IRQn@)  /* UART 4 RX Interrupt                   */
+#cmakedefine UARTTX4_IRQn               (@UARTTX4_IRQn@)  /* UART 4 TX Interrupt                   */
+#cmakedefine UART0_IRQn                 (@UART0_IRQn@)  /* UART 0 combined Interrupt             */
+#cmakedefine UART1_IRQn                 (@UART1_IRQn@)  /* UART 1 combined Interrupt             */
+#cmakedefine UART2_IRQn                 (@UART2_IRQn@)  /* UART 2 combined Interrupt             */
+#cmakedefine UART3_IRQn                 (@UART3_IRQn@)  /* UART 3 combined Interrupt             */
+#cmakedefine UART4_IRQn                 (@UART4_IRQn@)  /* UART 4 combined Interrupt             */
+#cmakedefine UARTOVF_IRQn               (@UARTOVF_IRQn@)  /* UART 0,1,2,3 and 4 Overflow Interrupt */
+#cmakedefine ETHERNET_IRQn              (@ETHERNET_IRQn@)  /* Ethernet Interrupt                    */
+#cmakedefine I2S_IRQn                   (@I2S_IRQn@)  /* I2S Interrupt                         */
+#cmakedefine TSC_IRQn                   (@TSC_IRQn@)  /* Touch Screen Interrupt                */
+#cmakedefine SPI2_IRQn                  (@SPI2_IRQn@)  /* SPI 2 Interrupt                       */
+#cmakedefine SPI3_IRQn                  (@SPI3_IRQn@)  /* SPI 3 Interrupt                       */
+#cmakedefine SPI4_IRQn                  (@SPI4_IRQn@)  /* SPI 4 Interrupt                       */
+
+#cmakedefine EthosU_IRQn                (@EthosU_IRQn@)   /* Ethos-Uxx Interrupt */
+
+#cmakedefine GPIO0_IRQn                 (@GPIO0_IRQn@)  /* GPIO 0 Combined Interrupt             */
+#cmakedefine GPIO1_IRQn                 (@GPIO1_IRQn@)  /* GPIO 1 Combined Interrupt             */
+#cmakedefine GPIO2_IRQn                 (@GPIO2_IRQn@)  /* GPIO 2 Combined Interrupt             */
+#cmakedefine GPIO3_IRQn                 (@GPIO3_IRQn@)  /* GPIO 3 Combined Interrupt             */
+
+#cmakedefine GPIO0_0_IRQn               (@GPIO0_0_IRQn@)  /* All P0 I/O pins used as irq source    */
+#cmakedefine GPIO0_1_IRQn               (@GPIO0_1_IRQn@)  /* There are 16 pins in total            */
+#cmakedefine GPIO0_2_IRQn               (@GPIO0_2_IRQn@)
+#cmakedefine GPIO0_3_IRQn               (@GPIO0_3_IRQn@)
+#cmakedefine GPIO0_4_IRQn               (@GPIO0_4_IRQn@)
+#cmakedefine GPIO0_5_IRQn               (@GPIO0_5_IRQn@)
+#cmakedefine GPIO0_6_IRQn               (@GPIO0_6_IRQn@)
+#cmakedefine GPIO0_7_IRQn               (@GPIO0_7_IRQn@)
+#cmakedefine GPIO0_8_IRQn               (@GPIO0_8_IRQn@)
+#cmakedefine GPIO0_9_IRQn               (@GPIO0_9_IRQn@)
+#cmakedefine GPIO0_10_IRQn              (@GPIO0_10_IRQn@)
+#cmakedefine GPIO0_11_IRQn              (@GPIO0_11_IRQn@)
+#cmakedefine GPIO0_12_IRQn              (@GPIO0_12_IRQn@)
+#cmakedefine GPIO0_13_IRQn              (@GPIO0_13_IRQn@)
+#cmakedefine GPIO0_14_IRQn              (@GPIO0_14_IRQn@)
+#cmakedefine GPIO0_15_IRQn              (@GPIO0_15_IRQn@)
+#cmakedefine GPIO1_0_IRQn               (@GPIO1_0_IRQn@)  /* All P1 I/O pins used as irq source    */
+#cmakedefine GPIO1_1_IRQn               (@GPIO1_1_IRQn@)  /* There are 16 pins in total            */
+#cmakedefine GPIO1_2_IRQn               (@GPIO1_2_IRQn@)
+#cmakedefine GPIO1_3_IRQn               (@GPIO1_3_IRQn@)
+#cmakedefine GPIO1_4_IRQn               (@GPIO1_4_IRQn@)
+#cmakedefine GPIO1_5_IRQn               (@GPIO1_5_IRQn@)
+#cmakedefine GPIO1_6_IRQn               (@GPIO1_6_IRQn@)
+#cmakedefine GPIO1_7_IRQn               (@GPIO1_7_IRQn@)
+#cmakedefine GPIO1_8_IRQn               (@GPIO1_8_IRQn@)
+#cmakedefine GPIO1_9_IRQn               (@GPIO1_9_IRQn@)
+#cmakedefine GPIO1_10_IRQn              (@GPIO1_10_IRQn@)
+#cmakedefine GPIO1_11_IRQn              (@GPIO1_11_IRQn@)
+#cmakedefine GPIO1_12_IRQn              (@GPIO1_12_IRQn@)
+#cmakedefine GPIO1_13_IRQn              (@GPIO1_13_IRQn@)
+#cmakedefine GPIO1_14_IRQn              (@GPIO1_14_IRQn@)
+#cmakedefine GPIO1_15_IRQn              (@GPIO1_15_IRQn@)
+#cmakedefine GPIO2_0_IRQn               (@GPIO2_0_IRQn@)  /* All P2 I/O pins used as irq source    */
+#cmakedefine GPIO2_1_IRQn               (@GPIO2_1_IRQn@)  /* There are 15 pins in total            */
+#cmakedefine GPIO2_2_IRQn               (@GPIO2_2_IRQn@)
+#cmakedefine GPIO2_3_IRQn               (@GPIO2_3_IRQn@)
+#cmakedefine GPIO2_4_IRQn               (@GPIO2_4_IRQn@)
+#cmakedefine GPIO2_5_IRQn               (@GPIO2_5_IRQn@)
+#cmakedefine GPIO2_6_IRQn               (@GPIO2_6_IRQn@)
+#cmakedefine GPIO2_7_IRQn               (@GPIO2_7_IRQn@)
+#cmakedefine GPIO2_8_IRQn               (@GPIO2_8_IRQn@)
+#cmakedefine GPIO2_9_IRQn               (@GPIO2_9_IRQn@)
+#cmakedefine GPIO2_10_IRQn              (@GPIO2_10_IRQn@)
+#cmakedefine GPIO2_11_IRQn              (@GPIO2_11_IRQn@)
+#cmakedefine GPIO2_12_IRQn              (@GPIO2_12_IRQn@)
+#cmakedefine GPIO2_13_IRQn              (@GPIO2_13_IRQn@)
+#cmakedefine GPIO2_14_IRQn              (@GPIO2_14_IRQn@)
+#cmakedefine GPIO2_15_IRQn              (@GPIO2_15_IRQn@)
+#cmakedefine GPIO3_0_IRQn               (@GPIO3_0_IRQn@)  /* All P3 I/O pins used as irq source    */
+#cmakedefine GPIO3_1_IRQn               (@GPIO3_1_IRQn@)  /* There are 4 pins in total             */
+#cmakedefine GPIO3_2_IRQn               (@GPIO3_2_IRQn@)
+#cmakedefine GPIO3_3_IRQn               (@GPIO3_3_IRQn@)
+#cmakedefine UARTRX5_IRQn               (@UARTRX5_IRQn@)  /* UART 5 RX Interrupt                   */
+#cmakedefine UARTTX5_IRQn               (@UARTTX5_IRQn@)  /* UART 5 TX Interrupt                   */
+#cmakedefine UART5_IRQn                 (@UART5_IRQn@)  /* UART 5 combined Interrupt             */
+#cmakedefine HDCLCD_IRQn                (@HDCLCD_IRQn@)  /* HDCLCD Interrupt                      */
+
+#endif /* PERIPHERAL_IRQS_H */
diff --git a/scripts/cmake/templates/peripheral_memmap.h.template b/scripts/cmake/templates/peripheral_memmap.h.template
new file mode 100644
index 0000000..050d7d7
--- /dev/null
+++ b/scripts/cmake/templates/peripheral_memmap.h.template
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef PERIPHERAL_MEMMAP_H
+#define PERIPHERAL_MEMMAP_H
+
+#cmakedefine DESIGN_NAME              "@DESIGN_NAME@"
+
+/******************************************************************************/
+/*                         Peripheral memory map                              */
+/******************************************************************************/
+
+#cmakedefine CMSDK_GPIO0_BASE         (@CMSDK_GPIO0_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine CMSDK_GPIO1_BASE         (@CMSDK_GPIO1_BASE@)       /* User GPIO 1 Base Address   */
+#cmakedefine CMSDK_GPIO2_BASE         (@CMSDK_GPIO2_BASE@)       /* User GPIO 2 Base Address   */
+#cmakedefine CMSDK_GPIO3_BASE         (@CMSDK_GPIO3_BASE@)       /* User GPIO 3 Base Address   */
+
+#cmakedefine AHB_USER0_BASE           (@AHB_USER0_BASE@)       /* AHB USER 0 Base Address (4KB) */
+#cmakedefine AHB_USER1_BASE           (@AHB_USER1_BASE@)       /* AHB USER 1 Base Address (4KB)*/
+#cmakedefine AHB_USER2_BASE           (@AHB_USER2_BASE@)       /* AHB USER 2 Base Address (4KB)*/
+#cmakedefine AHB_USER3_BASE           (@AHB_USER3_BASE@)       /* AHB USER 3 Base Address (4KB)*/
+
+#cmakedefine DMA0_BASE                (@DMA0_BASE@)       /* DMA0 (4KB) */
+#cmakedefine DMA1_BASE                (@DMA1_BASE@)       /* DMA1 (4KB) */
+#cmakedefine DMA2_BASE                (@DMA2_BASE@)       /* DMA2 (4KB) */
+#cmakedefine DMA3_BASE                (@DMA3_BASE@)       /* DMA3 (4KB) */
+
+#cmakedefine USER_APB0_BASE           (@USER_APB0_BASE@)       /* User APB0 */
+#cmakedefine USER_APB1_BASE           (@USER_APB1_BASE@)       /* User APB1 */
+#cmakedefine USER_APB2_BASE           (@USER_APB2_BASE@)       /* User APB2 */
+#cmakedefine USER_APB3_BASE           (@USER_APB3_BASE@)       /* User APB3 */
+
+#cmakedefine MPS3_I2C0_BASE           (@MPS3_I2C0_BASE@)       /* Touch Screen I2C Base Address */
+#cmakedefine MPS3_I2C1_BASE           (@MPS3_I2C1_BASE@)       /* Audio Interface I2C Base Address */
+#cmakedefine MPS3_SSP2_BASE           (@MPS3_SSP2_BASE@)       /* ADC SPI PL022 Base Address   */
+#cmakedefine MPS3_SSP3_BASE           (@MPS3_SSP3_BASE@)       /* Shield 0 SPI PL022 Base Address   */
+
+#cmakedefine MPS3_SSP4_BASE           (@MPS3_SSP4_BASE@)       /* Shield 1 SPI PL022 Base Address   */
+#cmakedefine MPS3_I2C2_BASE           (@MPS3_I2C2_BASE@)       /* Shield 0 SBCon Base Address */
+#cmakedefine MPS3_I2C3_BASE           (@MPS3_I2C3_BASE@)       /* Shield 1 SBCon Base Address */
+
+#cmakedefine USER_APB_BASE            (@USER_APB_BASE@)       /* User APB Base Address */
+#cmakedefine MPS3_I2C4_BASE           (@MPS3_I2C4_BASE@)       /* HDMI I2C SBCon Base Address */
+#cmakedefine MPS3_I2C5_BASE           (@MPS3_I2C5_BASE@)       /* DDR EPROM I2C SBCon Base Address */
+#cmakedefine MPS3_SCC_BASE            (@MPS3_SCC_BASE@)       /* SCC Base Address    */
+#cmakedefine MPS3_AAIC_I2S_BASE       (@MPS3_AAIC_I2S_BASE@)       /* Audio Interface I2S Base Address */
+#cmakedefine MPS3_FPGAIO_BASE         (@MPS3_FPGAIO_BASE@)       /* FPGA IO Base Address */
+#cmakedefine PL011_UART0_BASE         (@PL011_UART0_BASE@)       /* PL011 UART0 Base Address */
+#cmakedefine CMSDK_UART0_BASE         (@CMSDK_UART0_BASE@)       /* UART 0 Base Address */
+#cmakedefine CMSDK_UART1_BASE         (@CMSDK_UART1_BASE@)       /* UART 1 Base Address */
+#cmakedefine CMSDK_UART2_BASE         (@CMSDK_UART2_BASE@)       /* UART 2 Base Address */
+#cmakedefine CMSDK_UART3_BASE         (@CMSDK_UART3_BASE@)       /* UART 3 Base Address Shield 0*/
+
+#cmakedefine ETHOS_U55_BASE           (@ETHOS_U55_BASE@)    /* Ethos-U55 base address*/
+#cmakedefine ETHOS_U55_TA0_BASE       (@ETHOS_U55_TA0_BASE@)    /* Ethos-U55's timing adapter 0 base address */
+#cmakedefine ETHOS_U55_TA1_BASE       (@ETHOS_U55_TA1_BASE@)    /* Ethos-U55's timing adapter 1 base address */
+
+#cmakedefine CMSDK_UART4_BASE         (@CMSDK_UART4_BASE@)       /* UART 4 Base Address Shield 1*/
+#cmakedefine CMSDK_UART5_BASE         (@CMSDK_UART5_BASE@)       /* UART 5 Base Address */
+#cmakedefine HDMI_AUDIO_BASE          (@HDMI_AUDIO_BASE@)       /* HDMI AUDIO Base Address */
+#cmakedefine CLCD_CONFIG_BASE         (@CLCD_CONFIG_BASE@)       /* CLCD CONFIG Base Address */
+#cmakedefine RTC_BASE                 (@RTC_BASE@)       /* RTC Base address */
+#cmakedefine SMSC9220_BASE            (@SMSC9220_BASE@)       /* Ethernet SMSC9220 Base Address */
+#cmakedefine USB_BASE                 (@USB_BASE@)       /* USB Base Address */
+#cmakedefine CMSDK_SDIO_BASE          (@CMSDK_SDIO_BASE@)       /* User SDIO Base Address   */
+#cmakedefine MPS3_CLCD_BASE           (@MPS3_CLCD_BASE@)       /* HDLCD Base Address   */
+#cmakedefine MPS3_eMMC_BASE           (@MPS3_eMMC_BASE@)       /* User eMMC Base Address   */
+#cmakedefine USER_BASE                (@USER_BASE@)       /* User ? Base Address */
+
+#cmakedefine QSPI_XIP_BASE            (@QSPI_XIP_BASE@)       /* QSPI XIP config Base Address */
+#cmakedefine QSPI_WRITE_BASE          (@QSPI_WRITE_BASE@)       /* QSPI write config Base Address */
+
+/******************************************************************************/
+/*                      Secure Peripheral memory map                          */
+/******************************************************************************/
+
+#cmakedefine MPC_ISRAM0_BASE_S        (@MPC_ISRAM0_BASE_S@)       /* ISRAM0 Memory Protection Controller Secure base address */
+#cmakedefine MPC_ISRAM1_BASE_S        (@MPC_ISRAM1_BASE_S@)       /* ISRAM1 Memory Protection Controller Secure base address */
+
+#cmakedefine SEC_CMSDK_GPIO0_BASE     (@SEC_CMSDK_GPIO0_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine SEC_CMSDK_GPIO1_BASE     (@SEC_CMSDK_GPIO1_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine SEC_CMSDK_GPIO2_BASE     (@SEC_CMSDK_GPIO2_BASE@)       /* User GPIO 0 Base Address   */
+#cmakedefine SEC_CMSDK_GPIO3_BASE     (@SEC_CMSDK_GPIO3_BASE@)       /* User GPIO 0 Base Address   */
+
+#cmakedefine SEC_AHB_USER0_BASE       (@SEC_AHB_USER0_BASE@)       /* AHB USER 0 Base Address (4KB) */
+#cmakedefine SEC_AHB_USER1_BASE       (@SEC_AHB_USER1_BASE@)       /* AHB USER 1 Base Address (4KB)*/
+#cmakedefine SEC_AHB_USER2_BASE       (@SEC_AHB_USER2_BASE@)       /* AHB USER 2 Base Address (4KB)*/
+#cmakedefine SEC_AHB_USER3_BASE       (@SEC_AHB_USER3_BASE@)       /* AHB USER 3 Base Address (4KB)*/
+
+#cmakedefine SEC_DMA0_BASE            (@SEC_DMA0_BASE@)       /* DMA0 (4KB) */
+#cmakedefine SEC_DMA1_BASE            (@SEC_DMA1_BASE@)       /* DMA1 (4KB) */
+#cmakedefine SEC_DMA2_BASE            (@SEC_DMA2_BASE@)       /* DMA2 (4KB) */
+#cmakedefine SEC_DMA3_BASE            (@SEC_DMA3_BASE@)       /* DMA3 (4KB) */
+
+#cmakedefine SEC_USER_APB0_BASE       (@SEC_USER_APB0_BASE@)       /* User APB0 */
+#cmakedefine SEC_USER_APB1_BASE       (@SEC_USER_APB1_BASE@)       /* User APB1 */
+#cmakedefine SEC_USER_APB2_BASE       (@SEC_USER_APB2_BASE@)       /* User APB2 */
+#cmakedefine SEC_USER_APB3_BASE       (@SEC_USER_APB3_BASE@)       /* User APB3 */
+
+#cmakedefine SEC_MPS3_I2C0_BASE       (@SEC_MPS3_I2C0_BASE@)       /* Touch Screen I2C Base Address */
+#cmakedefine SEC_MPS3_I2C1_BASE       (@SEC_MPS3_I2C1_BASE@)       /* Audio Interface I2C Base Address */
+#cmakedefine SEC_MPS3_SSP2_BASE       (@SEC_MPS3_SSP2_BASE@)       /* ADC SPI PL022 Base Address   */
+#cmakedefine SEC_MPS3_SSP3_BASE       (@SEC_MPS3_SSP3_BASE@)       /* Shield 0 SPI PL022 Base Address   */
+
+#cmakedefine SEC_MPS3_SSP4_BASE       (@SEC_MPS3_SSP4_BASE@)       /* Shield 1 SPI PL022 Base Address   */
+#cmakedefine SEC_MPS3_I2C2_BASE       (@SEC_MPS3_I2C2_BASE@)       /* Shield 0 SBCon Base Address */
+#cmakedefine SEC_MPS3_I2C3_BASE       (@SEC_MPS3_I2C3_BASE@)       /* Shield 1 SBCon Base Address */
+
+#cmakedefine SEC_MPS3_I2C4_BASE       (@SEC_MPS3_I2C4_BASE@)       /* HDMI I2C SBCon Base Address */
+#cmakedefine SEC_MPS3_I2C5_BASE       (@SEC_MPS3_I2C5_BASE@)       /* DDR EPROM I2C SBCon Base Address */
+#cmakedefine SEC_MPS3_SCC_BASE        (@SEC_MPS3_SCC_BASE@)       /* SCC Base Address    */
+#cmakedefine SEC_MPS3_AAIC_I2S_BASE   (@SEC_MPS3_AAIC_I2S_BASE@)       /* Audio Interface I2S Base Address */
+#cmakedefine SEC_MPS3_FPGAIO_BASE     (@SEC_MPS3_FPGAIO_BASE@)       /* FPGA IO Base Address */
+#cmakedefine SEC_CMSDK_UART0_BASE     (@SEC_CMSDK_UART0_BASE@)       /* UART 0 Base Address */
+#cmakedefine SEC_CMSDK_UART1_BASE     (@SEC_CMSDK_UART1_BASE@)       /* UART 1 Base Address */
+#cmakedefine SEC_CMSDK_UART2_BASE     (@SEC_CMSDK_UART2_BASE@)       /* UART 2 Base Address */
+#cmakedefine SEC_CMSDK_UART3_BASE     (@SEC_CMSDK_UART3_BASE@)       /* UART 3 Base Address Shield 0*/
+
+#cmakedefine SEC_CMSDK_UART4_BASE     (@SEC_CMSDK_UART4_BASE@)       /* UART 4 Base Address Shield 1*/
+#cmakedefine SEC_CMSDK_UART5_BASE     (@SEC_CMSDK_UART5_BASE@)       /* UART 5 Base Address */
+#cmakedefine SEC_HDMI_AUDIO_BASE      (@SEC_HDMI_AUDIO_BASE@)       /* HDMI AUDIO Base Address */
+#cmakedefine SEC_CLCD_CONFIG_BASE     (@SEC_CLCD_CONFIG_BASE@)       /* CLCD CONFIG Base Address */
+#cmakedefine SEC_RTC_BASE             (@SEC_RTC_BASE@)       /* RTC Base address */
+#cmakedefine SEC_SMSC9220_BASE        (@SEC_SMSC9220_BASE@)       /* Ethernet SMSC9220 Base Address */
+#cmakedefine SEC_USB_BASE             (@SEC_USB_BASE@)       /* USB Base Address */
+
+#cmakedefine SEC_ETHOS_U55_BASE       (@SEC_ETHOS_U55_BASE@)   /* Ethos-U55 base address*/
+#cmakedefine SEC_ETHOS_U55_TA0_BASE   (@SEC_ETHOS_U55_TA0_BASE@)   /* Ethos-U55's timing adapter 0 base address */
+#cmakedefine SEC_ETHOS_U55_TA1_BASE   (@SEC_ETHOS_U55_TA1_BASE@)   /* Ethos-U55's timing adapter 1 base address */
+
+#cmakedefine SEC_USER_BASE            (@SEC_USER_BASE@)       /* User ? Base Address */
+
+#cmakedefine SEC_QSPI_XIP_BASE        (@SEC_QSPI_XIP_BASE@)       /* QSPI XIP config Base Address */
+#cmakedefine SEC_QSPI_WRITE_BASE      (@SEC_QSPI_WRITE_BASE@)       /* QSPI write config Base Address */
+
+/******************************************************************************/
+/*                                  MPCs                                      */
+/******************************************************************************/
+
+#cmakedefine MPC_ISRAM0_BASE_S        (@MPC_ISRAM0_BASE_S@)       /* Internal SRAM 0 MPC */
+#cmakedefine MPC_ISRAM1_BASE_S        (@MPC_ISRAM1_BASE_S@)       /* Internal SRAM 1 MPC */
+#cmakedefine MPC_BRAM_BASE_S          (@MPC_BRAM_BASE_S@)       /* SRAM Memory Protection Controller Secure base address */
+#cmakedefine MPC_QSPI_BASE_S          (@MPC_QSPI_BASE_S@)       /* QSPI Memory Protection Controller Secure base address */
+#cmakedefine MPC_DDR4_BASE_S          (@MPC_DDR4_BASE_S@)       /* DDR4 Memory Protection Controller Secure base address */
+
+#endif /* PERIPHERAL_MEMMAP_H */
diff --git a/scripts/cmake/templates/timing_adapter_settings.template b/scripts/cmake/templates/timing_adapter_settings.template
new file mode 100644
index 0000000..d5e202a
--- /dev/null
+++ b/scripts/cmake/templates/timing_adapter_settings.template
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Auto-generated file
+// ** DO NOT EDIT **
+
+#ifndef TIMING_ADAPTER_SETTINGS_H
+#define TIMING_ADAPTER_SETTINGS_H
+
+#cmakedefine TA0_BASE       (@TA0_BASE@)
+#cmakedefine TA1_BASE       (@TA1_BASE@)
+
+/* Timing adapter settings for AXI0 */
+#if defined(TA0_BASE)
+
+#define TA0_MAXR           (@TA0_MAXR@)
+#define TA0_MAXW           (@TA0_MAXW@)
+#define TA0_MAXRW          (@TA0_MAXRW@)
+#define TA0_RLATENCY       (@TA0_RLATENCY@)
+#define TA0_WLATENCY       (@TA0_WLATENCY@)
+#define TA0_PULSE_ON       (@TA0_PULSE_ON@)
+#define TA0_PULSE_OFF      (@TA0_PULSE_OFF@)
+#define TA0_BWCAP          (@TA0_BWCAP@)
+#define TA0_PERFCTRL       (@TA0_PERFCTRL@)
+#define TA0_PERFCNT        (@TA0_PERFCNT@)
+#define TA0_MODE           (@TA0_MODE@)
+#define TA0_HISTBIN        (@TA0_HISTBIN@)
+#define TA0_HISTCNT        (@TA0_HISTCNT@)
+
+#endif /* defined(TA0_BASE) */
+
+/* Timing adapter settings for AXI1 */
+#if defined(TA1_BASE)
+
+#define TA1_MAXR           (@TA1_MAXR@)
+#define TA1_MAXW           (@TA1_MAXW@)
+#define TA1_MAXRW          (@TA1_MAXRW@)
+#define TA1_RLATENCY       (@TA1_RLATENCY@)
+#define TA1_WLATENCY       (@TA1_WLATENCY@)
+#define TA1_PULSE_ON       (@TA1_PULSE_ON@)
+#define TA1_PULSE_OFF      (@TA1_PULSE_OFF@)
+#define TA1_BWCAP          (@TA1_BWCAP@)
+#define TA1_PERFCTRL       (@TA1_PERFCTRL@)
+#define TA1_PERFCNT        (@TA1_PERFCNT@)
+#define TA1_MODE           (@TA1_MODE@)
+#define TA1_HISTBIN        (@TA1_HISTBIN@)
+#define TA1_HISTCNT        (@TA1_HISTCNT@)
+
+#endif /* defined(TA1_BASE) */
+
+#endif /* TIMING_ADAPTER_SETTINGS_H */
\ No newline at end of file
diff --git a/scripts/cmake/tensorflow.cmake b/scripts/cmake/tensorflow.cmake
new file mode 100644
index 0000000..1123c7f
--- /dev/null
+++ b/scripts/cmake/tensorflow.cmake
@@ -0,0 +1,130 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+include(ProcessorCount)
+ProcessorCount(J)
+
+if (CMAKE_BUILD_TYPE STREQUAL Debug)
+    set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "debug")
+    set(TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL "-O0")
+elseif (CMAKE_BUILD_TYPE STREQUAL Release)
+    set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "release")
+    set(TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL "-O3")
+elseif(CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo)
+    set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "release_with_logs")
+    # No override for optimsiation level; we rely on the default
+    # optimisation applied by TensorFlow Lite Micro build here.
+elseif (NOT DEFINED TENSORFLOW_LITE_MICRO_BUILD_TYPE)
+    message(WARNING     "TENSORFLOW_LITE_MICRO_BUILD_TYPE is not set.")
+    message(FATAL_ERROR "Build type ${CMAKE_BUILD_TYPE} does not have a corresponding "
+                        "default to set TensorFlow build type")
+endif()
+
+USER_OPTION(TENSORFLOW_LITE_MICRO_BUILD_TYPE "TensorFlow Lite Mirco build type (release/debug etc.)"
+    ${TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE}
+    STRING)
+
+USER_OPTION(TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS "Select if TPIP downloads should be cleaned before each build."
+    OFF
+    BOOL)
+
+USER_OPTION(TENSORFLOW_LITE_MICRO_CLEAN_BUILD "Select if clean target should be added to a list of targets"
+    ON
+    BOOL)
+
+if (CMAKE_CXX_COMPILER_ID STREQUAL "ARMClang")
+    set(TENSORFLOW_LITE_MICRO_TOOLCHAIN "armclang")
+elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+    set(TENSORFLOW_LITE_MICRO_TOOLCHAIN "gcc")
+else ()
+    message(FATAL_ERROR "No compiler ID is set")
+endif()
+
+get_filename_component(TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT ${CMAKE_C_COMPILER} DIRECTORY)
+set(TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT "${TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT}/")
+
+set(TENSORFLOW_LITE_MICRO_PATH "${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro")
+set(TENSORFLOW_LITE_MICRO_GENDIR ${CMAKE_CURRENT_BINARY_DIR}/tensorflow/)
+
+set(CMSIS_DSP_MAKEFILE_INC ${CMAKE_CURRENT_SOURCE_DIR}/scripts/make/cmsis_dsp.inc)
+set(ETHOS_EVAL_TARGET_MAKEFILE_INC ${CMAKE_CURRENT_SOURCE_DIR}/scripts/make/cortex_m_ethos_eval_makefile.inc)
+
+if (TARGET_PLATFORM STREQUAL native)
+    set(TENSORFLOW_LITE_MICRO_TARGET "linux")
+    set(TENSORFLOW_LITE_MICRO_TARGET_ARCH x86_64)
+else()
+    set(TENSORFLOW_LITE_MICRO_TARGET "cortex_m_ethos_eval")
+    set(TENSORFLOW_LITE_MICRO_TARGET_ARCH ${CMAKE_SYSTEM_PROCESSOR}${CPU_FEATURES})
+    if(ETHOS_U55_ENABLED)
+        # Arm Ethos-U55 NPU is the co-processor for ML workload:
+        set(TENSORFLOW_LITE_MICRO_CO_PROCESSOR  "ethos_u")
+    endif()
+
+    set(TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL  "cmsis_nn")
+
+    # Copy over the target helper (cortex_m_ethos_eval)
+    file(COPY ${ETHOS_EVAL_TARGET_MAKEFILE_INC}
+        DESTINATION ${TENSORFLOW_LITE_MICRO_PATH}/tools/make/targets/)
+endif()
+
+if (TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS)
+    list(APPEND MAKE_TARGETS_LIST "clean_downloads")
+endif()
+
+if (TENSORFLOW_LITE_MICRO_CLEAN_BUILD)
+    list(APPEND MAKE_TARGETS_LIST "clean")
+endif()
+
+# Primary target
+list(APPEND MAKE_TARGETS_LIST "microlite")
+message(STATUS "TensorFlow Lite Micro build to be called for these targets: ${MAKE_TARGETS_LIST}")
+
+# Commands and targets
+add_custom_target(tensorflow_build ALL
+
+    # Command to build the TensorFlow Lite Micro library
+    COMMAND make -j${J} -f ${TENSORFLOW_LITE_MICRO_PATH}/tools/make/Makefile ${MAKE_TARGETS_LIST}
+        TARGET_TOOLCHAIN_ROOT=${TENSORFLOW_LITE_MICRO_TARGET_TOOLCHAIN_ROOT}
+        TOOLCHAIN=${TENSORFLOW_LITE_MICRO_TOOLCHAIN}
+        GENDIR=${TENSORFLOW_LITE_MICRO_GENDIR}
+        TARGET=${TENSORFLOW_LITE_MICRO_TARGET}
+        TARGET_ARCH=${TENSORFLOW_LITE_MICRO_TARGET_ARCH}
+        BUILD_TYPE=${TENSORFLOW_LITE_MICRO_BUILD_TYPE}
+        ETHOSU_DRIVER_PATH=${ETHOS_U55_DRIVER_SRC_PATH}
+        CMSIS_PATH=${CMSIS_SRC_PATH}
+
+        # Conditional arguments
+        $<$<BOOL:${ARMCLANG_DEBUG_DWARF_LEVEL}>:ARMCLANG_DEBUG_DWARF_LEVEL=${ARMCLANG_DEBUG_DWARF_LEVEL}>
+        $<$<BOOL:${TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL}>:OPTIMIZATION_LEVEL=${TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL}>
+        $<$<BOOL:${TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL}>:OPTIMIZED_KERNEL_DIR=${TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL}>
+        $<$<BOOL:${TENSORFLOW_LITE_MICRO_CO_PROCESSOR}>:CO_PROCESSOR=${TENSORFLOW_LITE_MICRO_CO_PROCESSOR}>
+
+    # Command to copy over the generated library to the local build tree.
+    COMMAND ${CMAKE_COMMAND} -E copy  ${TENSORFLOW_LITE_MICRO_GENDIR}/lib/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+            ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+
+    COMMENT "Building TensorFlow Lite Micro library..."
+
+    BYPRODUCTS ${TENSORFLOW_SRC_PATH}/tensorflow/tensorflow/lite/micro/tools/make/downloads
+                ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+                ${TENSORFLOW_LITE_MICRO_GENDIR}/lib/${TENSORFLOW_LITE_MICRO_PLATFORM_LIB_NAME}
+
+    WORKING_DIRECTORY ${TENSORFLOW_SRC_PATH})
+
+# Create library
+add_library(tensorflow-lite-micro STATIC IMPORTED)
+add_dependencies(tensorflow-lite-micro tensorflow_build)
diff --git a/scripts/cmake/util_functions.cmake b/scripts/cmake/util_functions.cmake
new file mode 100644
index 0000000..6d76131
--- /dev/null
+++ b/scripts/cmake/util_functions.cmake
@@ -0,0 +1,143 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+##############################################################################
+# Helper function to provide user option and corresponding default value
+##############################################################################
+function(USER_OPTION name description default type)
+
+    if (NOT DEFINED ${name})
+        set(${name} ${default} CACHE ${type} ${description})
+    endif()
+
+    # if it is a path
+    if (${type} STREQUAL PATH)
+
+        # Get the absolute path, relative to the cmake root
+        get_filename_component(ABSPATH "${${name}}" ABSOLUTE BASE_DIR ${CMAKE_SOURCE_DIR})
+
+        # check that this is a directory
+        if (NOT IS_DIRECTORY ${ABSPATH})
+            message(FATAL_ERROR
+                "Invalid directory path. Description: ${description}; Path: ${ABSPATH}")
+        endif()
+
+        set(${name} ${ABSPATH} CACHE ${type} ${description} FORCE)
+
+    # if this is a file path
+    elseif(${type} STREQUAL FILEPATH)
+
+        # Get the absolute path, relative to the cmake root
+        get_filename_component(ABSPATH "${${name}}" ABSOLUTE BASE_DIR ${CMAKE_SOURCE_DIR})
+
+        # check that the file exists:
+        if (NOT EXISTS ${ABSPATH})
+            message(FATAL_ERROR
+                "Invalid file path. Description: ${description}; Path: ${ABSPATH}")
+        endif()
+
+        set(${name} ${ABSPATH} CACHE ${type} ${description} FORCE)
+
+    endif()
+
+    message(STATUS "User option ${name} is set to ${${name}}")
+    LIST(APPEND USER_OPTIONS ${name})
+    set(USER_OPTIONS ${USER_OPTIONS} CACHE INTERNAL "")
+
+endfunction()
+
+# Function to get the path type for a variable
+# Args:
+#   path_var[in]:           path variable for which the cmake path type is requested
+#   cmake_path_type[out]:   CMake path type. Set to FILEPATH when it is a file
+#                           or PATH when it points to a directory. If the path
+#                           is invalid, this remains empty.
+function(get_path_type path_var cmake_path_type)
+    # Validate path - get absolute value
+    get_filename_component(ABSPATH "${path_var}" ABSOLUTE
+                           BASE_DIR ${CMAKE_SOURCE_DIR})
+
+    if (DEFINED path_var)
+        if (IS_DIRECTORY ${ABSPATH})
+            set(${cmake_path_type} PATH PARENT_SCOPE)
+            message(STATUS "Variable of PATH type")
+        elseif(EXISTS ${ABSPATH})
+            set(${cmake_path_type} FILEPATH PARENT_SCOPE)
+        else()
+            set(${cmake_path_type} "" PARENT_SCOPE)
+        endif()
+    else()
+        set(${cmake_path_type} UNINITIALIZED PARENT_SCOPE)
+    endif()
+
+endfunction()
+
+# Function to print all the user options added using the function `USER_OPTION`
+function(print_useroptions)
+    message(STATUS "--------------------------------------------------------------------------------------------------")
+    message(STATUS "Defined build user options:")
+    message(STATUS "")
+    foreach(opt ${USER_OPTIONS})
+        message(STATUS "    ${opt}=${${opt}}")
+    endforeach()
+    message(STATUS "--------------------------------------------------------------------------------------------------")
+endfunction()
+
+function (SUBDIRLIST result curdir)
+    file(GLOB children RELATIVE ${curdir} ${curdir}/*)
+    set(dirlist "")
+    foreach(child ${children})
+        if(IS_DIRECTORY ${curdir}/${child})
+            LIST(APPEND dirlist ${child})
+        endif()
+    endforeach()
+    set(${result} ${dirlist} PARENT_SCOPE)
+endfunction()
+
+function(to_py_bool cmake_bool py_bool)
+    if(${${cmake_bool}})
+        set(${py_bool} True PARENT_SCOPE)
+    else()
+        set(${py_bool} False PARENT_SCOPE)
+    endif()
+endfunction()
+
+# Function to download a files from the Arm Model Zoo
+# Arguments:
+#   file_sub_path: subpath within the model zoo respository
+#   download_path: location where this file is to be downloaded (path including filename)
+function(download_file_from_modelzoo file_sub_path download_path)
+
+    set(MODEL_ZOO_REPO      "https://github.com/ARM-software/ML-zoo/raw")
+    set(MODEL_ZOO_VERSION   "68b5fbc77ed28e67b2efc915997ea4477c1d9d5b")
+
+    string(JOIN "/" FILE_URL
+        ${MODEL_ZOO_REPO} ${MODEL_ZOO_VERSION} ${file_sub_path})
+
+    message(STATUS "Downloading ${FILE_URL} to ${download_path}...")
+
+    file(DOWNLOAD ${FILE_URL} ${download_path}
+        STATUS DOWNLOAD_STATE)
+    list(GET DOWNLOAD_STATE 0 RET_VAL)
+
+    if(${RET_VAL})
+        list(GET DOWNLOAD_STATE 1 RET_MSG)
+        message(FATAL_ERROR "Download failed with error code: ${RET_VAL}; "
+                            "Error message: ${RET_MSG}")
+    endif()
+
+endfunction()
diff --git a/scripts/make/cortex_m_ethos_eval_makefile.inc b/scripts/make/cortex_m_ethos_eval_makefile.inc
new file mode 100644
index 0000000..dbb460d
--- /dev/null
+++ b/scripts/make/cortex_m_ethos_eval_makefile.inc
@@ -0,0 +1,153 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# Generic Makefile target for ARM Cortex M builds.
+# For more info see: tensorflow/lite/micro/cortex_m_generic/README.md
+ifeq ($(TARGET),$(filter $(TARGET), cortex_m_ethos_eval))
+  FLOAT := soft
+  GCC_TARGET_ARCH := $(TARGET_ARCH)
+
+  ifeq ($(TARGET_ARCH), cortex-m0)
+    CORE=M0
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M0
+
+  else ifeq ($(TARGET_ARCH), cortex-m3)
+    CORE=M3
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M3
+
+  else ifeq ($(TARGET_ARCH), cortex-m33)
+    CORE=M33
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M33
+    TARGET_SPECIFIC_FLAGS += -D__DSP_PRESENT=1 -D__FPU_PRESENT=1 -D__VTOR_PRESENT=1 -D__FPU_USED=1
+    FLOAT=hard
+
+  else ifeq ($(TARGET_ARCH), cortex-m33+nodsp)
+    CORE=M33
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M33.no_dsp.no_fp
+
+  else ifeq ($(TARGET_ARCH), cortex-m4)
+    CORE=M4
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M4.no_fp
+    GCC_TARGET_ARCH := cortex-m4+nofp
+
+  else ifeq ($(TARGET_ARCH), cortex-m4+fp)
+    CORE=M4
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M4
+    TARGET_SPECIFIC_FLAGS += -D__FPU_PRESENT=1
+    FLOAT=hard
+    GCC_TARGET_ARCH := cortex-m4
+
+  else ifeq ($(TARGET_ARCH), cortex-m55)
+    CORE=M55
+    ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.fp
+    TARGET_SPECIFIC_FLAGS += -D__DSP_PRESENT=1 -D__FPU_PRESENT=1
+    FLOAT=hard
+
+  else ifeq ($(TARGET_ARCH), cortex-m55+nodsp+nofp)
+    CORE=M55
+    ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.no_dsp.no_fp
+
+  else ifeq ($(TARGET_ARCH), cortex-m55+nofp)
+    CORE=M55
+    ARM_LDFLAGS := -Wl,--cpu=8.1-M.Main.mve.no_fp
+    TARGET_SPECIFIC_FLAGS += -D__DSP_PRESENT=1
+
+  else ifeq ($(TARGET_ARCH), cortex-m7)
+    CORE=M7
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M7.no_fp
+    GCC_TARGET_ARCH := cortex-m7+nofp
+
+  else ifeq ($(TARGET_ARCH), cortex-m7+fp)
+    CORE=M7
+    ARM_LDFLAGS := -Wl,--cpu=Cortex-M7
+    FLOAT=hard
+    GCC_TARGET_ARCH := cortex-m7
+
+  else
+    $(error "TARGET_ARCH=$(TARGET_ARCH) is not supported")
+  endif
+
+  ifneq ($(filter cortex-m55%,$(TARGET_ARCH)),)
+    # soft-abi=soft disables MVE - use softfp instead for M55.
+    ifeq ($(FLOAT),soft)
+      FLOAT=softfp
+    endif
+  endif
+
+  # Toolchain specfic flags
+  ifeq ($(TOOLCHAIN), armclang)
+    CXX_TOOL  := armclang
+    CC_TOOL   := armclang
+    AR_TOOL   := armar
+    LD        := armlink
+
+    FLAGS_ARMC = \
+      --target=arm-arm-none-eabi \
+      -mcpu=$(TARGET_ARCH)
+
+    # For debug, include specific dwarf format symbols
+    ifeq ($(BUILD_TYPE), debug)
+      ifneq ($(ARMCLANG_DEBUG_DWARF_LEVEL),)
+        FLAGS_ARMC += -gdwarf-$(ARMCLANG_DEBUG_DWARF_LEVEL)
+      endif
+    endif
+
+    CXXFLAGS += $(FLAGS_ARMC)
+    CCFLAGS += $(FLAGS_ARMC)
+    LDFLAGS += $(ARM_LDFLAGS)
+
+    # Arm Compiler will not link the Math library (see below), therefore we're filtering it out.
+    # See Fatal error: L6450U: Cannot find library m:
+    # "Arm Compiler is designed to run in a bare metal environment,
+    # and automatically includes implementations of these functions,
+    # and so no such flag is necessary."
+    # https://developer.arm.com/documentation/100891/0611/troubleshooting/general-troubleshooting-advice
+    MICROLITE_LIBS := $(filter-out -lm,$(MICROLITE_LIBS))
+
+  else ifeq ($(TOOLCHAIN), gcc)
+    export PATH := $(MAKEFILE_DIR)/downloads/gcc_embedded/bin/:$(PATH)
+    DOWNLOAD_RESULT := $(shell $(MAKEFILE_DIR)/arm_gcc_download.sh ${MAKEFILE_DIR}/downloads)
+    ifneq ($(DOWNLOAD_RESULT), SUCCESS)
+      $(error Something went wrong with the GCC download: $(DOWNLOAD_RESULT))
+    endif
+
+    TARGET_TOOLCHAIN_PREFIX := arm-none-eabi-
+
+    FLAGS_GCC = -mcpu=$(GCC_TARGET_ARCH) -mfpu=auto
+    CXXFLAGS += $(FLAGS_GCC)
+    CCFLAGS += $(FLAGS_GCC)
+
+  else
+    $(error "TOOLCHAIN=$(TOOLCHAIN) is not supported.")
+  endif
+
+  PLATFORM_FLAGS = \
+    -DTF_LITE_MCU_DEBUG_LOG \
+    -mthumb \
+    -mfloat-abi=$(FLOAT) \
+    -funsigned-char \
+    -mlittle-endian \
+    -Wno-type-limits \
+    -Wno-unused-private-field \
+    -fomit-frame-pointer \
+    -MD \
+    -DCPU_CORTEX_$(CORE)=1 \
+    $(TARGET_SPECIFIC_FLAGS)
+
+  # Common + C/C++ flags
+  CXXFLAGS += $(PLATFORM_FLAGS)
+  CCFLAGS += $(PLATFORM_FLAGS)
+
+endif
diff --git a/scripts/py/gen_audio.py b/scripts/py/gen_audio.py
new file mode 100644
index 0000000..53ed019
--- /dev/null
+++ b/scripts/py/gen_audio.py
@@ -0,0 +1,48 @@
+#!env/bin/python3
+
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+"""
+Utility script to convert an audio clip into eval platform desired spec.
+"""
+import soundfile as sf
+
+from argparse import ArgumentParser
+from os import path
+
+from gen_utils import AudioUtils
+
+parser = ArgumentParser()
+parser.add_argument("--audio_path", help="Audio file path", required=True)
+parser.add_argument("--output_dir", help="Output directory", required=True)
+parser.add_argument("--sampling_rate", type=int, help="target sampling rate.", default=16000)
+parser.add_argument("--mono", type=bool, help="convert signal to mono.", default=True)
+parser.add_argument("--offset", type=float, help="start reading after this time (in seconds).", default=0)
+parser.add_argument("--duration", type=float, help="only load up to this much audio (in seconds).", default=0)
+parser.add_argument("--res_type", type=AudioUtils.res_data_type, help=f"Resample type: {AudioUtils.res_type_list()}.", default='kaiser_best')
+parser.add_argument("--min_samples", type=int, help="Minimum sample number.", default=16000)
+parser.add_argument("-v", "--verbosity", action="store_true")
+args = parser.parse_args()
+
+def main(args):
+    audio_data, samplerate = AudioUtils.load_resample_audio_clip(args.audio_path,
+                                                args.sampling_rate,
+                                                args.mono,  args.offset,
+                                                args.duration, args.res_type,
+                                                args.min_samples)
+    sf.write(path.join(args.output_dir, path.basename(args.audio_path)), audio_data, samplerate)
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_audio_cpp.py b/scripts/py/gen_audio_cpp.py
new file mode 100644
index 0000000..54fdb23
--- /dev/null
+++ b/scripts/py/gen_audio_cpp.py
@@ -0,0 +1,153 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a set of audio clip in a given location into
+corresponding cpp files and a single hpp file referencing the vectors
+from the cpp files.
+"""
+import datetime
+import glob
+import math
+import os
+
+import numpy as np
+from os import path
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+from gen_utils import AudioUtils
+
+parser = ArgumentParser()
+parser.add_argument("--audio_path", type=str, help="path to audio folder to convert.")
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.")
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--sampling_rate", type=int, help="target sampling rate.", default=16000)
+parser.add_argument("--mono", type=bool, help="convert signal to mono.", default=True)
+parser.add_argument("--offset", type=float, help="start reading after this time (in seconds).", default=0)
+parser.add_argument("--duration", type=float, help="only load up to this much audio (in seconds).", default=0)
+parser.add_argument("--res_type", type=AudioUtils.res_data_type, help=f"Resample type: {AudioUtils.res_type_list()}.",
+                    default='kaiser_best')
+parser.add_argument("--min_samples", type=int, help="Minimum sample number.", default=16000)
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+parser.add_argument("-v", "--verbosity", action="store_true")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_filepath, cc_filepath, header_template_file, num_audios, audio_filenames, audio_array_namesizes):
+    print(f"++ Generating {header_filepath}")
+
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('AudioClips.hpp.template').stream(common_template_header=hdr,
+                                                       clips_count=num_audios,
+                                                       varname_size=audio_array_namesizes
+                                                       ) \
+        .dump(str(header_filepath))
+
+    print(f"++ Generating {cc_filepath}")
+
+    env.get_template('AudioClips.cc.template').stream(common_template_header=hdr,
+                                                       clips_count=num_audios,
+                                                       var_names=(name for name, _ in audio_array_namesizes),
+                                                       clip_sizes=(size for _, size in audio_array_namesizes),
+                                                       clip_names=audio_filenames) \
+        .dump(str(cc_filepath))
+
+
+def write_individual_audio_cc_file(clip_dirpath, clip_filename,
+                                   cc_filename, header_template_file, array_name,
+                                   sampling_rate_value, mono_value, offset_value, 
+                                   duration_value, res_type_value, min_len):
+    print(f"++ Converting {clip_filename} to {path.basename(cc_filename)}")
+    audio_filepath = path.join(clip_dirpath, clip_filename)
+    clip_data, samplerate = AudioUtils.load_resample_audio_clip(audio_filepath,
+                                                                sampling_rate_value, mono_value,
+                                                                offset_value, duration_value,
+                                                                res_type_value, min_len)
+
+    # Change from [-1, 1] fp32 range to int16 range.
+    clip_data = np.clip((clip_data * (1 << 15)), 
+                        np.iinfo(np.int16).min, 
+                        np.iinfo(np.int16).max).flatten().astype(np.int16)
+
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=clip_filename,
+                                 year=datetime.datetime.now().year)
+
+    hex_line_generator = (', '.join(map(hex, sub_arr))
+                          for sub_arr in np.array_split(clip_data, math.ceil(len(clip_data)/20)))
+
+    env.get_template('audio.cc.template').stream(common_template_header=hdr,
+                                                 size=len(clip_data),
+                                                 var_name=array_name,
+                                                 audio_data=hex_line_generator) \
+        .dump(str(cc_filename))
+
+    return len(clip_data)
+
+
+def main(args):
+    # Keep the count of the audio files converted
+    audioclip_idx = 0
+    audioclip_filenames = []
+    audioclip_array_names = []
+    header_filename = "InputFiles.hpp"
+    common_cc_filename = "InputFiles.cc"
+    header_filepath = path.join(args.header_folder_path, header_filename)
+    common_cc_filepath = path.join(args.source_folder_path, common_cc_filename)
+
+    if os.path.isdir(args.audio_path):  
+        filepaths = sorted(glob.glob(path.join(args.audio_path, '**/*.wav'), recursive=True))
+    elif os.path.isfile(args.audio_path):
+        filepaths = [args.audio_path]
+    else:
+        raise OSError("Directory or file does not exist.")
+
+    for filepath in filepaths:
+        filename = path.basename(filepath)	
+        clip_dirpath = path.dirname(filepath)
+        try:
+            audioclip_filenames.append(filename)
+
+            # Save the cc file
+            cc_filename = path.join(args.source_folder_path,
+                                    (filename.rsplit(".")[0]).replace(" ", "_") + ".cc")
+            array_name = "audio" + str(audioclip_idx)
+            array_size = write_individual_audio_cc_file(clip_dirpath, filename, cc_filename, args.license_template, array_name,
+                                                        args.sampling_rate, args.mono, args.offset,
+                                                        args.duration, args.res_type, args.min_samples)
+
+            audioclip_array_names.append((array_name, array_size))
+            # Increment audio index
+            audioclip_idx = audioclip_idx + 1
+        except:
+            if args.verbosity:
+                print(f"Failed to open {filename} as an audio.")
+
+    write_hpp_file(header_filepath, common_cc_filepath, args.license_template,
+                   audioclip_idx, audioclip_filenames, audioclip_array_names)
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_default_input_cpp.py b/scripts/py/gen_default_input_cpp.py
new file mode 100644
index 0000000..c091fd1
--- /dev/null
+++ b/scripts/py/gen_default_input_cpp.py
@@ -0,0 +1,53 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to generate the minimum InputFiles.hpp and cpp files required by an application.
+"""
+import datetime
+import os
+
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_file_path, header_template_file):
+    print(f"++ Generating {header_file_path}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('default.hpp.template').stream(common_template_header=hdr) \
+        .dump(str(header_file_path))
+
+
+def main(args):
+    header_filename = "InputFiles.hpp"
+    header_filepath = os.path.join(args.header_folder_path, header_filename)
+    write_hpp_file(header_filepath, args.license_template)
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_fpga_mem_map.py b/scripts/py/gen_fpga_mem_map.py
new file mode 100644
index 0000000..6a2d1d2
--- /dev/null
+++ b/scripts/py/gen_fpga_mem_map.py
@@ -0,0 +1,192 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+from argparse import ArgumentParser
+
+"""
+This file is used as part of post build steps to generate 'images.txt' file
+which can be copied over onto the MPS3 board's SD card. The purpose is to
+limit having to manually edit the file based on different load regions that
+the build scatter file might dictate.
+"""
+
+def is_commented(line):
+    if (line.startswith(";")):
+        return True
+    else:
+        return False
+
+
+def is_load_rom(line):
+    load_region_specifiers = ['LOAD_ROM', 'LD_ROM', 'LOAD_REGION']
+
+    for load_specifier in load_region_specifiers:
+        if line.startswith(load_specifier):
+            return True
+
+    return False
+
+
+class TargetSubsystem:
+
+    def __init__(self, target_subsystem_name: str):
+        """
+        Constructor for target class.
+        Arguments:
+            target_subsystem_name: name of the target subsystem
+        """
+        # Dict with mem map and binary names we expect
+        self.subsystems = {
+            "sse-200": {
+                "mmap_mcc" : {
+                    # FPGA addr |  MCC addr  |
+                    "0x00000000": "0x00000000", # ITCM (NS)
+                    "0x10000000": "0x01000000", # ITCM (S)
+                    "0x20000000": "0x02000000", # DTCM (NS)
+                    "0x30000000": "0x03000000", # DTCM (S)
+                    "0x60000000": "0x08000000"  # DDR (NS)
+                },
+                "bin_names": {
+                    0: "itcm.bin",
+                    1: "dram.bin"
+                }
+            },
+            "sse-300": {
+                "mmap_mcc" : {
+                    # FPGA addr |  MCC addr  |
+                    "0x00000000": "0x00000000", # ITCM (NS)
+                    "0x01000000": "0x02000000", # BRAM or FPGA's data SRAM (NS)
+                    "0x60000000": "0x08000000", # DDR (NS)
+                    "0x70000000": "0x0c000000"  # DDR (S)
+                },
+                "bin_names": {
+                    0: "itcm.bin",
+                    1: "dram.bin"
+                }
+            }
+        }
+
+        self.name = target_subsystem_name
+
+
+    def is_supported(self, target_subsystem: str) -> bool:
+        """
+        Checks if the target subsystem exists within systems
+        supported by this script
+        """
+        if target_subsystem in self.subsystems.keys():
+            return True
+
+        print(f"Platforms supported: {self.subsystems.keys()}")
+        return False
+
+
+    def mps3_mappings(self) -> dict:
+        """
+        Returns the FPGA <--> MCC address translations
+        as a dict
+        """
+        if self.is_supported(self.name):
+            return self.subsystems[self.name]['mmap_mcc']
+        return {}
+
+
+    def mps3_bin_names(self) -> dict:
+        """
+        Returns expected binary names for the executable built
+        for Cortex-M55 or Cortex-M55+Ethos-U55 targets in the
+        form of a dict with index and name
+        """
+        if self.is_supported(self.name):
+            return self.subsystems[self.name]['bin_names']
+
+        return {}
+
+
+def main(args):
+    """
+    Generates the output txt file with MCC to FPGA address mapping used
+    that is used by the MCC on FPGA to load executable regions into
+    correct regions in memory.
+    """
+    # List out arguments used:
+    scatter_file_path = args.scatter_file_path
+    target_subsystem_name = args.target_subsystem
+    output_file_path = args.output_file_path
+
+    target = TargetSubsystem(target_subsystem_name=target_subsystem_name)
+
+    if target.is_supported(target_subsystem_name) != True:
+        print(f'Target {target_subsystem_name} not supported.')
+        return
+
+    with open(scatter_file_path,'r') as scatter_file:
+        lines_read = scatter_file.readlines()
+        str_list = []
+
+        bin_names = None
+        mem_map = None
+
+        mem_map = target.mps3_mappings()
+        bin_names = target.mps3_bin_names()
+
+        str_list.append("TITLE: Arm MPS3 FPGA prototyping board Images Configuration File\n")
+        str_list.append("[IMAGES]\n\n")
+
+        cnt = 0
+        for line in lines_read:
+            if is_commented(line) or is_load_rom(line) != True:
+                continue
+
+            addr = line.split()[1]
+
+            if mem_map.get(addr, None) == None:
+                raise RuntimeError(
+                    'Translation for this address unavailable')
+            if cnt > len(bin_names):
+                raise RuntimeError(
+                    f"bin names len exceeded: {cnt}")
+
+            str_list.append("IMAGE" + str(cnt) + "ADDRESS: " +
+                mem_map[addr] + " ; MCC@" + mem_map[addr] +
+                " <=> FPGA@"  + addr + "\n")
+            str_list.append("IMAGE" + str(cnt) + "UPDATE: AUTO\n")
+            str_list.append("IMAGE" + str(cnt) + "FILE: \SOFTWARE\\" +
+                bin_names[cnt] + "\n\n")
+            cnt += 1
+
+        if cnt > 0 and cnt < 33:
+            str_list.insert(2,
+                "TOTALIMAGES: {} ;Number of Images (Max: 32)\n\n".format(
+                    cnt))
+        else:
+            raise RuntimeError('Invalid image count')
+
+        if os.path.exists(output_file_path):
+            os.remove(output_file_path)
+        print(''.join(str_list), file=open(output_file_path, "a"))
+
+
+if __name__ == "__main__":
+    parser = ArgumentParser()
+    parser.add_argument("--scatter_file_path", type=str, required=True,
+                        help="Path to the scatter file")
+    parser.add_argument("--target_subsystem", type=str, required=True,
+                        help="Target subsystem in use")
+    parser.add_argument("--output_file_path", type=str, required=True,
+                        help="Output file path")
+    args = parser.parse_args()
+    main(args)
diff --git a/scripts/py/gen_labels_cpp.py b/scripts/py/gen_labels_cpp.py
new file mode 100644
index 0000000..1be9c63
--- /dev/null
+++ b/scripts/py/gen_labels_cpp.py
@@ -0,0 +1,81 @@
+#!env/bin/python3
+
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a given text file with labels (annotations for an
+NN model output vector) into a vector list initialiser. The intention is for
+this script to be called as part of the build framework to auto-generate the
+cpp file with labels that can be used in the application without modification.
+"""
+import datetime
+import os
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+
+# Label file path
+parser.add_argument("--labels_file", type=str, help="Path to the label text file", required=True)
+# Output file to be generated
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.", required=True)
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.", required=True)
+parser.add_argument("--output_file_name", type=str, help="Required output file name", required=True)
+# Namespaces
+parser.add_argument("--namespaces", action='append', default=[])
+# License template
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def main(args):
+    # Get the labels from text file
+    with open(args.labels_file, "r") as f:
+        labels = f.read().splitlines()
+
+    # No labels?
+    if len(labels) == 0:
+        raise Exception(f"no labels found in {args.label_file}")
+
+    header_template = env.get_template(args.license_template)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=os.path.basename(args.labels_file),
+                                 year=datetime.datetime.now().year)
+
+    hpp_filename = os.path.join(args.header_folder_path, args.output_file_name + ".hpp")
+    env.get_template('Labels.hpp.template').stream(common_template_header=hdr,
+                                                   filename=(args.output_file_name).upper(),
+                                                   namespaces=args.namespaces) \
+        .dump(str(hpp_filename))
+
+
+    cc_filename = os.path.join(args.source_folder_path, args.output_file_name + ".cc")
+    env.get_template('Labels.cc.template').stream(common_template_header=hdr,
+                                                  labels=labels,
+                                                  labelsSize=len(labels),
+                                                  namespaces=args.namespaces) \
+        .dump(str(cc_filename))
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_model_cpp.py b/scripts/py/gen_model_cpp.py
new file mode 100644
index 0000000..4843668
--- /dev/null
+++ b/scripts/py/gen_model_cpp.py
@@ -0,0 +1,97 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to generate model c file that can be included in the
+project directly. This should be called as part of cmake framework
+should the models need to be generated at configuration stage.
+"""
+import datetime
+import os
+from argparse import ArgumentParser
+from pathlib import Path
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+
+parser.add_argument("--tflite_path", help="Model (.tflite) path", required=True)
+parser.add_argument("--output_dir", help="Output directory", required=True)
+parser.add_argument('-e', '--expression', action='append', default=[], dest="expr")
+parser.add_argument('--header', action='append', default=[], dest="headers")
+parser.add_argument('-ns', '--namespaces', action='append', default=[], dest="namespaces")
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_tflite_data(tflite_path):
+    # Extract array elements
+
+    bytes = model_hex_bytes(tflite_path)
+    line = '{\n'
+    i = 1
+    while True:
+        try:
+            el = next(bytes)
+            line = line + el + ', '
+            if i % 20 == 0:
+                yield line
+                line = ''
+            i += 1
+        except StopIteration:
+            line = line[:-2] + '};\n'
+            yield line
+            break
+
+
+def model_hex_bytes(tflite_path):
+    with open(tflite_path, 'rb') as tflite_model:
+        byte = tflite_model.read(1)
+        while byte != b"":
+            yield f'0x{byte.hex()}'
+            byte = tflite_model.read(1)
+
+
+def main(args):
+    if not os.path.isfile(args.tflite_path):
+        raise Exception(f"{args.tflite_path} not found")
+
+    # Cpp filename:
+    cpp_filename = Path(os.path.join(args.output_dir, os.path.basename(args.tflite_path) + ".cc")).absolute()
+    print(f"++ Converting {os.path.basename(args.tflite_path)} to\
+    {os.path.basename(cpp_filename)}")
+
+    os.makedirs(cpp_filename.parent, exist_ok=True)
+
+    header_template = env.get_template(args.license_template)
+
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 file_name=os.path.basename(args.tflite_path),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+
+    env.get_template('tflite.cc.template').stream(common_template_header=hdr,
+                                                  model_data=write_tflite_data(args.tflite_path),
+                                                  expressions=args.expr,
+                                                  additional_headers=args.headers,
+                                                  namespaces=args.namespaces).dump(str(cpp_filename))
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_rgb_cpp.py b/scripts/py/gen_rgb_cpp.py
new file mode 100644
index 0000000..1a2e09b
--- /dev/null
+++ b/scripts/py/gen_rgb_cpp.py
@@ -0,0 +1,135 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a set of RGB images in a given location into
+corresponding cpp files and a single hpp file referencing the vectors
+from the cpp files.
+"""
+import datetime
+import glob
+import math
+import os
+import numpy as np
+
+from argparse import ArgumentParser
+from PIL import Image, UnidentifiedImageError
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+parser.add_argument("--image_path", type=str, help="path to images folder or image file  to convert.")
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.")
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--image_size", type=int, nargs=2, help="Size (width and height) of the converted images.")
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_file_path, cc_file_path, header_template_file, num_images, image_filenames,
+                   image_array_names, image_size):
+    print(f"++ Generating {header_file_path}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('Images.hpp.template').stream(common_template_header=hdr,
+                                                   imgs_count=num_images,
+                                                   img_size=str(image_size[0] * image_size[1] * 3),
+                                                   var_names=image_array_names) \
+        .dump(str(header_file_path))
+
+    env.get_template('Images.cc.template').stream(common_template_header=hdr,
+                                                  var_names=image_array_names,
+                                                  img_names=image_filenames) \
+        .dump(str(cc_file_path))
+
+
+def write_individual_img_cc_file(image_filename, cc_filename, header_template_file, original_image,
+                                 image_size, array_name):
+    print(f"++ Converting {image_filename} to {os.path.basename(cc_filename)}")
+
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=os.path.basename(image_filename),
+                                 year=datetime.datetime.now().year)
+
+    original_image.thumbnail(image_size)
+    delta_w = abs(image_size[0] - original_image.size[0])
+    delta_h = abs(image_size[1] - original_image.size[1])
+    resized_image = Image.new('RGB', args.image_size, (255, 255, 255, 0))
+    resized_image.paste(original_image, (int(delta_w / 2), int(delta_h / 2)))
+
+    # Convert the image and write it to the cc file
+    rgb_data = np.array(resized_image, dtype=np.uint8).flatten()
+    hex_line_generator = (', '.join(map(hex, sub_arr))
+                          for sub_arr in np.array_split(rgb_data, math.ceil(len(rgb_data) / 20)))
+    env.get_template('image.cc.template').stream(common_template_header=hdr,
+                                                 var_name=array_name,
+                                                 img_data=hex_line_generator) \
+        .dump(str(cc_filename))
+
+
+def main(args):
+    # Keep the count of the images converted
+    image_idx = 0
+    image_filenames = []
+    image_array_names = []
+
+
+    if os.path.isdir(args.image_path):
+        filepaths = sorted(glob.glob(os.path.join(args.image_path, '**/*.*'), recursive=True))
+    elif os.path.isfile(args.image_path):
+        filepaths = [args.image_path]
+    else:
+        raise OSError("Directory or file does not exist.")
+
+    for filepath in filepaths:
+        filename = os.path.basename(filepath)
+
+        try:
+            original_image = Image.open(filepath).convert("RGB")
+        except UnidentifiedImageError:
+            print(f"-- Skipping file {filepath} due to unsupported image format.")
+            continue
+
+        image_filenames.append(filename)
+
+        # Save the cc file
+        cc_filename = os.path.join(args.source_folder_path,
+                                   (filename.rsplit(".")[0]).replace(" ", "_") + ".cc")
+        array_name = "im" + str(image_idx)
+        image_array_names.append(array_name)
+        write_individual_img_cc_file(filename, cc_filename, args.license_template,
+                                     original_image, args.image_size, array_name)
+
+        # Increment image index
+        image_idx = image_idx + 1
+
+    header_filename = "InputFiles.hpp"
+    header_filepath = os.path.join(args.header_folder_path, header_filename)
+    common_cc_filename = "InputFiles.cc"
+    common_cc_filepath = os.path.join(args.source_folder_path, common_cc_filename)
+    write_hpp_file(header_filepath, common_cc_filepath, args.license_template,
+                   image_idx, image_filenames, image_array_names, args.image_size)
+
+
+if __name__ == '__main__':
+    main(args)
diff --git a/scripts/py/gen_test_data_cpp.py b/scripts/py/gen_test_data_cpp.py
new file mode 100644
index 0000000..7cc5f11
--- /dev/null
+++ b/scripts/py/gen_test_data_cpp.py
@@ -0,0 +1,162 @@
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+"""
+Utility script to convert a set of pairs of npy files in a given location into
+corresponding cpp files and a single hpp file referencing the vectors
+from the cpp files.
+"""
+import datetime
+import math
+import os
+import numpy as np
+
+from argparse import ArgumentParser
+from jinja2 import Environment, FileSystemLoader
+
+parser = ArgumentParser()
+parser.add_argument("--data_folder_path", type=str, help="path to ifm-ofm npy folder to convert.")
+parser.add_argument("--source_folder_path", type=str, help="path to source folder to be generated.")
+parser.add_argument("--header_folder_path", type=str, help="path to header folder to be generated.")
+parser.add_argument("--usecase", type=str, default="", help="Test data file suffix.")
+parser.add_argument("--namespaces", action='append', default=[])
+parser.add_argument("--license_template", type=str, help="Header template file",
+                    default="header_template.txt")
+parser.add_argument("-v", "--verbosity", action="store_true")
+
+args = parser.parse_args()
+
+env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
+                  trim_blocks=True,
+                  lstrip_blocks=True)
+
+
+def write_hpp_file(header_filename, cc_file_path, header_template_file, num_iofms,
+                   ifm_array_names, ifm_size, ofm_array_names, ofm_size, iofm_data_type):
+    header_file_path = os.path.join(args.header_folder_path, header_filename)
+
+    print(f"++ Generating {header_file_path}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 year=datetime.datetime.now().year)
+    env.get_template('TestData.hpp.template').stream(common_template_header=hdr,
+                                                   fm_count=num_iofms,
+                                                   ifm_var_names=ifm_array_names,
+                                                   ifm_var_size=ifm_size,
+                                                   ofm_var_names=ofm_array_names,
+                                                   ofm_var_size=ofm_size,
+                                                   data_type=iofm_data_type,
+                                                   namespaces=args.namespaces) \
+        .dump(str(header_file_path))
+
+    env.get_template('TestData.cc.template').stream(common_template_header=hdr,
+                                                  include_h=header_filename,
+                                                  ifm_var_names=ifm_array_names,
+                                                  ofm_var_names=ofm_array_names,
+                                                  data_type=iofm_data_type,
+                                                  namespaces=args.namespaces) \
+        .dump(str(cc_file_path))
+
+
+def write_individual_cc_file(filename, cc_filename, header_filename, header_template_file, array_name, iofm_data_type):
+    print(f"++ Converting {filename} to {os.path.basename(cc_filename)}")
+    header_template = env.get_template(header_template_file)
+    hdr = header_template.render(script_name=os.path.basename(__file__),
+                                 gen_time=datetime.datetime.now(),
+                                 file_name=os.path.basename(filename),
+                                 year=datetime.datetime.now().year)
+
+    # Convert the image and write it to the cc file
+    fm_data = (np.load(os.path.join(args.data_folder_path, filename))).flatten()
+    type(fm_data.dtype)
+    hex_line_generator = (', '.join(map(hex, sub_arr))
+                          for sub_arr in np.array_split(fm_data, math.ceil(len(fm_data) / 20)))
+
+    env.get_template('testdata.cc.template').stream(common_template_header=hdr,
+                                                 include_h=header_filename,
+                                                 var_name=array_name,
+                                                 fm_data=hex_line_generator,
+                                                 data_type=iofm_data_type,
+                                                 namespaces=args.namespaces) \
+        .dump(str(cc_filename))
+
+
+def get_npy_vec_size(filename: str) -> int:
+    """
+    Gets the size of the array in the npy file
+    Args:
+        filename: npy file path.
+    Return:
+        size in bytes
+    """
+    data = np.load(os.path.join(args.data_folder_path, filename))
+    return (data.size * data.dtype.itemsize)
+
+
+def main(args):
+    # Keep the count of the images converted
+    ifm_array_names = []
+    ofm_array_names = []
+
+    add_usecase_fname = ("_" + args.usecase) if (args.usecase is not "") else ""
+    header_filename = "TestData" + add_usecase_fname + ".hpp"
+    common_cc_filename = "TestData" + add_usecase_fname + ".cc"
+
+    # In the data_folder_path there should be pairs of ifm-ofm
+    # It's assumed the ifm-ofm nameing convention: ifm0.npy-ofm0.npy, ifm1.npy-ofm1.npy
+    i_ofms_count = int(len([name for name in os.listdir(os.path.join(args.data_folder_path)) if name.lower().endswith('.npy')]) / 2)
+
+    iofm_data_type = "int8_t"
+    if (i_ofms_count > 0):
+        iofm_data_type = "int8_t" if (np.load(os.path.join(args.data_folder_path, "ifm0.npy")).dtype == np.int8) else "uint8_t"
+
+    ifm_size = -1
+    ofm_size = -1
+
+    for idx in range(i_ofms_count):
+        # Save the fm cc file
+        base_name = "ifm" + str(idx)
+        filename = base_name+".npy"
+        array_name = base_name + add_usecase_fname
+        cc_filename = os.path.join(args.source_folder_path, array_name + ".cc")
+        ifm_array_names.append(array_name)
+        write_individual_cc_file(filename, cc_filename, header_filename, args.license_template, array_name, iofm_data_type)
+        if ifm_size == -1:
+            ifm_size = get_npy_vec_size(filename)
+        elif ifm_size != get_npy_vec_size(filename):
+            raise Exeception(f"ifm size changed for index {idx}")
+
+        # Save the fm cc file
+        base_name = "ofm" + str(idx)
+        filename = base_name+".npy"
+        array_name = base_name + add_usecase_fname
+        cc_filename = os.path.join(args.source_folder_path, array_name + ".cc")
+        ofm_array_names.append(array_name)
+        write_individual_cc_file(filename, cc_filename, header_filename, args.license_template, array_name, iofm_data_type)
+        if ofm_size == -1:
+            ofm_size = get_npy_vec_size(filename)
+        elif ofm_size != get_npy_vec_size(filename):
+            raise Exeception(f"ofm size changed for index {idx}")
+
+    common_cc_filepath = os.path.join(args.source_folder_path, common_cc_filename)
+    write_hpp_file(header_filename, common_cc_filepath, args.license_template,
+                   i_ofms_count, ifm_array_names, ifm_size, ofm_array_names, ofm_size, iofm_data_type)
+
+
+if __name__ == '__main__':
+    if args.verbosity:
+        print("Running gen_test_data_cpp with args: "+str(args))
+    main(args)
diff --git a/scripts/py/gen_utils.py b/scripts/py/gen_utils.py
new file mode 100644
index 0000000..4a56646
--- /dev/null
+++ b/scripts/py/gen_utils.py
@@ -0,0 +1,115 @@
+#!env/bin/python3
+
+#  Copyright (c) 2021 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import soundfile as sf
+import resampy
+import numpy as np
+
+
+class AudioUtils:
+    @staticmethod
+    def res_data_type(res_type_value):
+        """
+        Returns the input string if is one of the valid resample type
+        """
+        import argparse
+        if res_type_value not in AudioUtils.res_type_list():
+            raise argparse.ArgumentTypeError(f"{res_type_value} not valid. Supported only {AudioUtils.res_type_list()}")
+        return res_type_value
+
+    @staticmethod
+    def res_type_list():
+        """
+        Returns the resample type list
+        """
+        return ['kaiser_best', 'kaiser_fast']
+
+    @staticmethod
+    def load_resample_audio_clip(path, target_sr=16000, mono=True, offset=0.0, duration=0, res_type='kaiser_best',
+                                 min_len=16000):
+        """
+        Load and resample an audio clip with the given desired specs.
+
+        Parameters:
+        ----------
+        path (string):             Path to the input audio clip.
+        target_sr (int, optional): Target sampling rate. Positive number are considered valid, 
+                                    if zero or negative the native sampling rate of the file will be preserved. Default is 16000. 
+        mono (bool, optional):     Specify if the audio file needs to be converted to mono. Default is True.
+        offset (float, optional):  Target sampling rate. Default is 0.0.
+        duration (int, optional):  Target duration. Positive number are considered valid, 
+                                    if zero or negative the duration of the file will be preserved. Default is 0.
+        res_type (int, optional):  Resample type to use,  Default is 'kaiser_best'.
+        min_len (int, optional):   Minimun lenght of the output audio time series. Default is 16000.
+
+        Returns:
+        ----------
+        y (np.ndarray): Output audio time series of shape shape=(n,) or (2, n).
+        sr (int):       A scalar number > 0 that represent the sampling rate of `y`
+        """
+        try:
+            with sf.SoundFile(path) as audio_file:
+                origin_sr = audio_file.samplerate
+
+                if offset:
+                    # Seek to the start of the target read
+                    audio_file.seek(int(offset * origin_sr))
+
+                if duration > 0:
+                    num_frame_duration = int(duration * origin_sr)
+                else:
+                    num_frame_duration = -1
+
+                # Load the target number of frames
+                y = audio_file.read(frames=num_frame_duration, dtype=np.float32, always_2d=False).T
+
+        except:
+            print(f"Failed to open {path} as an audio.")
+
+        # Convert to mono if requested and if audio has more than one dimension
+        if mono and (y.ndim > 1):
+            y = np.mean(y, axis=0)
+
+        if not (origin_sr == target_sr) and (target_sr > 0):
+            ratio = float(target_sr) / origin_sr
+            axis = -1
+            n_samples = int(np.ceil(y.shape[axis] * ratio))
+
+            # Resample using resampy
+            y_rs = resampy.resample(y, origin_sr, target_sr, filter=res_type, axis=axis)
+            n_rs_samples = y_rs.shape[axis]
+
+            # Adjust the size
+            if n_rs_samples > n_samples:
+                slices = [slice(None)] * y_rs.ndim
+                slices[axis] = slice(0, n_samples)
+                y = y_rs[tuple(slices)]
+            elif n_rs_samples < n_samples:
+                lengths = [(0, 0)] * y_rs.ndim
+                lengths[axis] = (0, n_samples - n_rs_samples)
+                y = np.pad(y_rs, lengths, 'constant', constant_values=(0))
+
+            sr = target_sr
+        else:
+            sr = origin_sr
+
+        # Pad if necessary and min lenght is setted (min_len> 0)
+        if (y.shape[0] < min_len) and (min_len > 0):
+            sample_to_pad = min_len - y.shape[0]
+            y = np.pad(y, (0, sample_to_pad), 'constant', constant_values=(0))
+
+        return y, sr
diff --git a/scripts/py/requirements.txt b/scripts/py/requirements.txt
new file mode 100644
index 0000000..6330f58
--- /dev/null
+++ b/scripts/py/requirements.txt
@@ -0,0 +1,12 @@
+cffi==1.14.2
+Jinja2==2.11.2
+llvmlite==0.33.0
+MarkupSafe==1.1.1
+numba==0.50.1
+numpy==1.17.4
+Pillow==7.0.0
+pycparser==2.20
+resampy==0.2.2
+scipy==1.5.2
+six==1.15.0
+SoundFile==0.10.3.post1
diff --git a/scripts/py/templates/AudioClips.cc.template b/scripts/py/templates/AudioClips.cc.template
new file mode 100644
index 0000000..edf46bc
--- /dev/null
+++ b/scripts/py/templates/AudioClips.cc.template
@@ -0,0 +1,62 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+
+static const char *audio_clip_filenames[] = {
+{% for name in clip_names %}
+    "{{name}}",
+{% endfor %}
+};
+
+static const int16_t *audio_clip_arrays[] = {
+    {{ var_names|join(',\n\t') }}
+};
+
+
+static const size_t audio_clip_sizes[NUMBER_OF_FILES] = {
+    {{ clip_sizes|join(',\n\t') }}
+};
+
+
+const char* get_filename(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return audio_clip_filenames[idx];
+    }
+    return nullptr;
+}
+
+
+const int16_t* get_audio_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return audio_clip_arrays[idx];
+    }
+    return nullptr;
+}
+
+
+uint32_t get_audio_array_size(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return audio_clip_sizes[idx];
+    }
+    return 0;
+}
+
diff --git a/scripts/py/templates/AudioClips.hpp.template b/scripts/py/templates/AudioClips.hpp.template
new file mode 100644
index 0000000..eb0beda
--- /dev/null
+++ b/scripts/py/templates/AudioClips.hpp.template
@@ -0,0 +1,34 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef GENERATED_AUDIOCLIPS_H
+#define GENERATED_AUDIOCLIPS_H
+
+#include <cstdint>
+#include <stddef.h>
+
+#define NUMBER_OF_FILES  ({{clips_count}}U)
+{% for var_name, size in varname_size %}
+extern const int16_t {{var_name}}[{{size}}];
+{% endfor %}
+
+const char* get_filename(const uint32_t idx);
+const int16_t* get_audio_array(const uint32_t idx);
+uint32_t get_audio_array_size(const uint32_t idx);
+
+#endif /* GENERATED_AUDIOCLIPS_H */
diff --git a/scripts/py/templates/Images.cc.template b/scripts/py/templates/Images.cc.template
new file mode 100644
index 0000000..6e86f98
--- /dev/null
+++ b/scripts/py/templates/Images.cc.template
@@ -0,0 +1,47 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+
+static const char *img_filenames[] = {
+{% for name in img_names %}
+    "{{name}}",
+{% endfor %}
+};
+
+static const uint8_t *img_arrays[] = {
+    {{ var_names|join(',\n\t') }}
+};
+
+const char* get_filename(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_filenames[idx];
+    }
+    return nullptr;
+}
+
+
+const uint8_t* get_img_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FILES) {
+        return img_arrays[idx];
+    }
+    return nullptr;
+}
+
diff --git a/scripts/py/templates/Images.hpp.template b/scripts/py/templates/Images.hpp.template
new file mode 100644
index 0000000..89ce39e
--- /dev/null
+++ b/scripts/py/templates/Images.hpp.template
@@ -0,0 +1,34 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef GENERATED_IMAGES_H
+#define GENERATED_IMAGES_H
+
+#include <cstdint>
+
+#define NUMBER_OF_FILES  ({{imgs_count}}U)
+#define IMAGE_DATA_SIZE  ({{img_size}}U)
+
+{% for var_name in var_names %}
+extern const uint8_t {{var_name}}[IMAGE_DATA_SIZE];
+{% endfor %}
+
+const char* get_filename(const uint32_t idx);
+const uint8_t* get_img_array(const uint32_t idx);
+
+#endif /* GENERATED_IMAGES_H */
diff --git a/scripts/py/templates/Labels.cc.template b/scripts/py/templates/Labels.cc.template
new file mode 100644
index 0000000..f1ec1b5
--- /dev/null
+++ b/scripts/py/templates/Labels.cc.template
@@ -0,0 +1,54 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "BufAttributes.hpp"
+
+#include <vector>
+#include <string>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+static const char * labelsVec[] LABELS_ATTRIBUTE = {
+{% for label in labels %}
+    "{{label}}",
+{% endfor %}
+};
+
+bool GetLabelsVector(std::vector<std::string>& labels)
+{
+    constexpr size_t labelsSz = {{labelsSize}};
+    labels.clear();
+
+    if (!labelsSz) {
+        return false;
+    }
+
+    labels.reserve(labelsSz);
+
+    for (size_t i = 0; i < labelsSz; ++i) {
+        labels.emplace_back(labelsVec[i]);
+    }
+
+    return true;
+}
+
+{% for namespace in namespaces %}
+} /* namespace {{name_space}} */
+{% endfor %}
diff --git a/scripts/py/templates/Labels.hpp.template b/scripts/py/templates/Labels.hpp.template
new file mode 100644
index 0000000..c16a983
--- /dev/null
+++ b/scripts/py/templates/Labels.hpp.template
@@ -0,0 +1,41 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef {{filename}}_HPP
+#define {{filename}}_HPP
+
+#include <string>
+#include <vector>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+/**
+ * @brief       Gets the label vector corresponding to the model
+ * @param[out]  labels   Vector of strings.
+ * @return      true if successful, false otherwise.
+ */
+extern bool GetLabelsVector(std::vector<std::string>& labels);
+
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
+
+#endif /* {{filename}}_HPP */
diff --git a/scripts/py/templates/TestData.cc.template b/scripts/py/templates/TestData.cc.template
new file mode 100644
index 0000000..1acd14d
--- /dev/null
+++ b/scripts/py/templates/TestData.cc.template
@@ -0,0 +1,51 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "{{include_h}}"
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+static const {{data_type}} *ifm_arrays[] = {
+    {{ ifm_var_names|join(',\n\t') }}
+};
+
+static const {{data_type}} *ofm_arrays[] = {
+    {{ ofm_var_names|join(',\n\t') }}
+};
+
+const {{data_type}}* get_ifm_data_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FM_FILES) {
+        return ifm_arrays[idx];
+    }
+    return nullptr;
+}
+
+const {{data_type}}* get_ofm_data_array(const uint32_t idx)
+{
+    if (idx < NUMBER_OF_FM_FILES) {
+        return ofm_arrays[idx];
+    }
+    return nullptr;
+}
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
diff --git a/scripts/py/templates/TestData.hpp.template b/scripts/py/templates/TestData.hpp.template
new file mode 100644
index 0000000..cdedd48
--- /dev/null
+++ b/scripts/py/templates/TestData.hpp.template
@@ -0,0 +1,47 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef GENERATED_TEST_DATA_H
+#define GENERATED_TEST_DATA_H
+
+#include <cstdint>
+
+{% for namespace in namespaces %}
+namespace {{namespace}} {
+{% endfor %}
+
+#define NUMBER_OF_FM_FILES  ({{fm_count}}U)
+#define IFM_DATA_SIZE  ({{ifm_var_size}}U)
+#define OFM_DATA_SIZE  ({{ofm_var_size}}U)
+
+{% for ifm_var_name in ifm_var_names %}
+extern const {{data_type}} {{ifm_var_name}}[IFM_DATA_SIZE];
+{% endfor %}
+
+{% for ofm_var_name in ofm_var_names %}
+extern const {{data_type}} {{ofm_var_name}}[OFM_DATA_SIZE];
+{% endfor %}
+
+const {{data_type}}* get_ifm_data_array(const uint32_t idx);
+const {{data_type}}* get_ofm_data_array(const uint32_t idx);
+
+{% for namespace in namespaces %}
+} /* namespace {{namespace}} */
+{% endfor %}
+
+#endif /* GENERATED_TEST_DATA_H */
diff --git a/scripts/py/templates/audio.cc.template b/scripts/py/templates/audio.cc.template
new file mode 100644
index 0000000..f1e29ef
--- /dev/null
+++ b/scripts/py/templates/audio.cc.template
@@ -0,0 +1,25 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#include "InputFiles.hpp"
+#include "BufAttributes.hpp"
+#include <cstdint>
+
+const int16_t {{var_name}} [{{size}}] IFM_BUF_ATTRIBUTE = {
+    {{audio_data|join(',\n\t')}}
+};
\ No newline at end of file
diff --git a/scripts/py/templates/default.hpp.template b/scripts/py/templates/default.hpp.template
new file mode 100644
index 0000000..acba891
--- /dev/null
+++ b/scripts/py/templates/default.hpp.template
@@ -0,0 +1,28 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+#}
+{{common_template_header}}
+
+#ifndef DEFAULT_GENERATED_INPUT_H
+#define DEFAULT_GENERATED_INPUT_H
+
+#include <cstdint>
+
+#define NUMBER_OF_FILES  (0U)
+
+const char* get_filename(const uint32_t idx);
+
+#endif /* DEFAULT_GENERATED_INPUT_H */
diff --git a/scripts/py/templates/header_template.txt b/scripts/py/templates/header_template.txt
new file mode 100644
index 0000000..0dac4be
--- /dev/null
+++ b/scripts/py/templates/header_template.txt
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) {{year}}, Arm Limited and affiliates.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*********************    Autogenerated file. DO NOT EDIT *******************
+ * Generated from {{script_name}} tool {% if file_name %}and {{file_name}}{% endif %} file.
+ * Date: {{gen_time}}
+ ***************************************************************************/
diff --git a/scripts/py/templates/image.cc.template b/scripts/py/templates/image.cc.template
new file mode 100644
index 0000000..010daa1
--- /dev/null
+++ b/scripts/py/templates/image.cc.template
@@ -0,0 +1,25 @@
+{#
+ Copyright (c) 2021 Arm Limited. All rights reserved.
+ SPDX-License-Identifier: Apache-2.0
+
+ Licensed under the Apache License, V