MLECO-1252 ASR sample application using the public ArmNN C++ API.

Change-Id: I98cd505b8772a8c8fa88308121bc94135bb45068
Signed-off-by: Éanna Ó Catháin <eanna.ocathain@arm.com>
diff --git a/samples/ObjectDetection/CMakeLists.txt b/samples/ObjectDetection/CMakeLists.txt
index 9e85fab..7e587f7 100644
--- a/samples/ObjectDetection/CMakeLists.txt
+++ b/samples/ObjectDetection/CMakeLists.txt
@@ -38,12 +38,16 @@
     set(DEPENDENCIES_DIR ${CMAKE_BINARY_DIR}/dependencies)
 endif()
 
-include(cmake/find_opencv.cmake)
-include(cmake/find_armnn.cmake)
+include(../common/cmake/find_opencv.cmake)
+include(../common/cmake/find_armnn.cmake)
 
 include_directories(include)
+include_directories(../common/include/ArmnnUtils)
+include_directories(../common/include/Utils)
+include_directories(../common/include/CVUtils)
 
 file(GLOB SOURCES "src/*.cpp")
+file(GLOB COMMON_SOURCES "../common/src/**/*.cpp")
 list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/Main.cpp)
 file(GLOB TEST_SOURCES "test/*.cpp")
 file(GLOB APP_MAIN "src/Main.cpp")
@@ -55,7 +59,7 @@
 
 set(APP_TARGET_NAME "${CMAKE_PROJECT_NAME}")
 
-add_executable("${APP_TARGET_NAME}" ${SOURCES} ${APP_MAIN})
+add_executable("${APP_TARGET_NAME}" ${SOURCES} ${COMMON_SOURCES} ${APP_MAIN})
 
 if (NOT OPENCV_LIBS_FOUND)
     message("Building OpenCV libs")
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index bceaa4b..408917e 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -230,7 +230,6 @@
 * --preferred-backends: Takes the preferred backends in preference order, separated by comma.
                         For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc].
                         Defaults to CpuRef **[OPTIONAL]**
-* --help: Prints all the available options to screen
 
 ### Object Detection on a supplied video file
 
@@ -379,8 +378,8 @@
 Generic object detection pipeline has 3 steps to perform data pre-processing, run inference and decode inference results
 in the post-processing step.
 
-See [`ObjDetectionPipeline`](./include/NetworkPipeline.hpp) and implementations for [`MobileNetSSDv1`](./include/NetworkPipeline.hpp)
-and [`YoloV3Tiny`](./include/NetworkPipeline.hpp) for more details.
+See [`ObjDetectionPipeline`](include/ObjectDetectionPipeline.hpp) and implementations for [`MobileNetSSDv1`](include/ObjectDetectionPipeline.hpp)
+and [`YoloV3Tiny`](include/ObjectDetectionPipeline.hpp) for more details.
 
 #### Pre-processing the Captured Frame
 Each frame captured from source is read as an `cv::Mat` in BGR format but channels are swapped to RGB in a frame reader
diff --git a/samples/ObjectDetection/cmake/unit_tests.cmake b/samples/ObjectDetection/cmake/unit_tests.cmake
index dcfa512..1a8c466 100644
--- a/samples/ObjectDetection/cmake/unit_tests.cmake
+++ b/samples/ObjectDetection/cmake/unit_tests.cmake
@@ -7,7 +7,7 @@
 
 file(GLOB TEST_SOURCES "test/*")
 
-include(cmake/find_catch.cmake)
+include(../common/cmake/find_catch.cmake)
 
 file(DOWNLOAD "https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip"
         ${CMAKE_CURRENT_SOURCE_DIR}/test/resources/models.zip SHOW_PROGRESS)
@@ -43,7 +43,7 @@
         INSTALL_COMMAND ""
         )
 
-add_executable("${TEST_TARGET_NAME}" ${SOURCES} ${TEST_SOURCES})
+add_executable("${TEST_TARGET_NAME}" ${SOURCES} ${TEST_SOURCES} ${COMMON_SOURCES})
 
 add_dependencies(
     "${TEST_TARGET_NAME}"
@@ -60,6 +60,6 @@
 
 target_include_directories("${TEST_TARGET_NAME}" PUBLIC ${TEST_TPIP_INCLUDE}
     ${ARMNN_INCLUDE_DIR}
-    ${OPENCV_INCLUDE_DIR} ${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR})
+    ${OPENCV_INCLUDE_DIR} ${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR} ${COMMON_INCLUDE_DIR})
 
 target_link_libraries("${TEST_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} ${OPENCV_LIBS} ${FFMPEG_LIBS})
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp b/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
deleted file mode 100644
index c75b68b..0000000
--- a/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "Types.hpp"
-
-#include "armnn/ArmNN.hpp"
-#include "armnnTfLiteParser/ITfLiteParser.hpp"
-#include "armnnUtils/DataLayoutIndexed.hpp"
-#include <armnn/Logging.hpp>
-
-#include <string>
-#include <vector>
-
-namespace od
-{
-/**
-* @brief Used to load in a network through ArmNN and run inference on it against a given backend.
-*
-*/
-class ArmnnNetworkExecutor
-{
-private:
-    armnn::IRuntimePtr m_Runtime;
-    armnn::NetworkId m_NetId{};
-    mutable InferenceResults m_OutputBuffer;
-    armnn::InputTensors     m_InputTensors;
-    armnn::OutputTensors    m_OutputTensors;
-    std::vector<armnnTfLiteParser::BindingPointInfo> m_outputBindingInfo;
-
-    std::vector<std::string> m_outputLayerNamesList;
-
-    armnnTfLiteParser::BindingPointInfo m_inputBindingInfo;
-
-    void PrepareTensors(const void* inputData, const size_t dataBytes);
-
-    template <typename Enumeration>
-    auto log_as_int(Enumeration value)
-    -> typename std::underlying_type<Enumeration>::type
-    {
-        return static_cast<typename std::underlying_type<Enumeration>::type>(value);
-    }
-
-public:
-    ArmnnNetworkExecutor() = delete;
-
-    /**
-    * @brief Initializes the network with the given input data. Parsed through TfLiteParser and optimized for a
-    *        given backend.
-    *
-    * Note that the output layers names order in m_outputLayerNamesList affects the order of the feature vectors
-    * in output of the Run method.
-    *
-    *       * @param[in] modelPath - Relative path to the model file
-    *       * @param[in] backends - The list of preferred backends to run inference on
-    */
-    ArmnnNetworkExecutor(std::string& modelPath,
-                         std::vector<armnn::BackendId>& backends);
-
-    /**
-    * @brief Returns the aspect ratio of the associated model in the order of width, height.
-    */
-    Size GetImageAspectRatio();
-
-    armnn::DataType GetInputDataType() const;
-
-    /**
-    * @brief Runs inference on the provided input data, and stores the results in the provided InferenceResults object.
-    *
-    * @param[in] inputData - input frame data
-    * @param[in] dataBytes - input data size in bytes
-    * @param[out] results - Vector of DetectionResult objects used to store the output result.
-    */
-    bool Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults);
-
-};
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CmdArgsParser.hpp b/samples/ObjectDetection/include/CmdArgsParser.hpp
deleted file mode 100644
index 6c22e6f..0000000
--- a/samples/ObjectDetection/include/CmdArgsParser.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-#include <string>
-#include <map>
-#include <iostream>
-
-const std::string MODEL_NAME = "--model-name";
-const std::string VIDEO_FILE_PATH = "--video-file-path";
-const std::string MODEL_FILE_PATH = "--model-file-path";
-const std::string OUTPUT_VIDEO_FILE_PATH = "--output-video-file-path";
-const std::string LABEL_PATH = "--label-path";
-const std::string PREFERRED_BACKENDS = "--preferred-backends";
-const std::string HELP = "--help";
-
-/*
- * The accepted options for this Object detection executable
- */
-static std::map<std::string, std::string> CMD_OPTIONS = {
-        {VIDEO_FILE_PATH, "[REQUIRED] Path to the video file to run object detection on"},
-        {MODEL_FILE_PATH, "[REQUIRED] Path to the Object Detection model to use"},
-        {LABEL_PATH, "[REQUIRED] Path to the label set for the provided model file. "
-                     "Label file is should just be an ordered list, seperated by new line."},
-        {MODEL_NAME, "[REQUIRED] The name of the model being used. Accepted options: YOLO_V3_TINY, SSD_MOBILE"},
-        {OUTPUT_VIDEO_FILE_PATH, "[OPTIONAL] Path to the output video file with detections added in. "
-                                 "If specified will save file to disk, else displays the output to screen"},
-        {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
-                             " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
-                             " Defaults to CpuAcc,CpuRef"}
-};
-
-/*
- * Checks that a particular option was specified by the user
- */
-bool CheckOptionSpecified(const std::map<std::string, std::string>& options, const std::string& option);
-
-
-/*
- * Retrieves the user provided option
- */
-std::string GetSpecifiedOption(const std::map<std::string, std::string>& options, const std::string& option);
-
-
-/*
- * Parses all the command line options provided by the user and stores in a map.
- */
-int ParseOptions(std::map<std::string, std::string>& options, std::map<std::string, std::string>& acceptedOptions,
-                 char *argv[], int argc);
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/IDetectionResultDecoder.hpp b/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
index c0a29df..a8a3cbb 100644
--- a/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
+++ b/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
@@ -30,9 +30,9 @@
     *
     * @return     Vector of decoded detected objects.
     */
-    virtual DetectedObjects Decode(const InferenceResults& results,
-                                   const Size& outputFrameSize,
-                                   const Size& resizedFrameSize,
+    virtual DetectedObjects Decode(const common::InferenceResults<float>& results,
+                                   const common::Size& outputFrameSize,
+                                   const common::Size& resizedFrameSize,
                                    const std::vector<std::string>& labels) = 0;
 
 };
diff --git a/samples/ObjectDetection/include/ImageUtils.hpp b/samples/ObjectDetection/include/ImageUtils.hpp
index 07e2b83..9bae568 100644
--- a/samples/ObjectDetection/include/ImageUtils.hpp
+++ b/samples/ObjectDetection/include/ImageUtils.hpp
@@ -21,7 +21,7 @@
 */
 void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults,
                                cv::Mat& inputFrame,
-                               std::vector<std::tuple<std::string, od::BBoxColor>>& labels);
+                               std::vector<std::tuple<std::string, common::BBoxColor>>& labels);
 
 /**
 * @brief Function to resize a frame while keeping aspect ratio.
@@ -30,7 +30,7 @@
 * @param[out]  dest            the frame we want to resize into.
 * @param[in]  aspectRatio      aspect ratio to use when resizing.
 */
-void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const od::Size& aspectRatio);
+void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const common::Size& aspectRatio);
 
 /**
 * @brief Function to pad a frame.
@@ -49,7 +49,7 @@
  * @param cache operation requires intermediate data container.
  * @param destSize size of the destination frame
  */
-void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const od::Size& destSize);
+void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const common::Size& destSize);
 
 /**
 * @brief Function to retrieve the cv::scalar color from a RGB tuple.
diff --git a/samples/ObjectDetection/include/NetworkPipeline.hpp b/samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
similarity index 88%
rename from samples/ObjectDetection/include/NetworkPipeline.hpp
rename to samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
index c3408b4..38de65b 100644
--- a/samples/ObjectDetection/include/NetworkPipeline.hpp
+++ b/samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
@@ -27,7 +27,7 @@
      * @param executor - unique pointer to inference runner
      * @param decoder - unique pointer to inference results decoder
      */
-    ObjDetectionPipeline(std::unique_ptr<ArmnnNetworkExecutor> executor,
+    ObjDetectionPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                          std::unique_ptr<IDetectionResultDecoder> decoder);
 
     /**
@@ -48,7 +48,7 @@
      * @param[in] processed - input inference data. Data type should be aligned with input tensor.
      * @param[out] result - raw floating point inference results.
      */
-    virtual void Inference(const cv::Mat& processed, InferenceResults& result);
+    virtual void Inference(const cv::Mat& processed, common::InferenceResults<float>& result);
 
     /**
      * @brief Standard inference results post-processing implementation.
@@ -58,13 +58,13 @@
      * @param[in] inferenceResult - inference results to be decoded.
      * @param[in] callback - a function to be called after successful inference results decoding.
      */
-    virtual void PostProcessing(InferenceResults& inferenceResult,
+    virtual void PostProcessing(common::InferenceResults<float>& inferenceResult,
                                 const std::function<void (DetectedObjects)>& callback);
 
 protected:
-    std::unique_ptr<ArmnnNetworkExecutor> m_executor;
+    std::unique_ptr<common::ArmnnNetworkExecutor<float>> m_executor;
     std::unique_ptr<IDetectionResultDecoder> m_decoder;
-    Size m_inputImageSize{};
+    common::Size m_inputImageSize{};
     cv::Mat m_processedFrame;
 };
 
@@ -85,7 +85,7 @@
      * @param ClsThreshold[in] -  class probability threshold for decoding step
      * @param ObjectThreshold[in] - detected object score threshold for decoding step
      */
-    YoloV3Tiny(std::unique_ptr<ArmnnNetworkExecutor> executor,
+    YoloV3Tiny(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                float NMSThreshold, float ClsThreshold, float ObjectThreshold);
 
     /**
@@ -116,7 +116,7 @@
      * @param[in] - unique pointer to inference runner
      * @paramp[in] objectThreshold - detected object score threshold for decoding step
      */
-    MobileNetSSDv1(std::unique_ptr<ArmnnNetworkExecutor> executor,
+    MobileNetSSDv1(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                    float objectThreshold);
 
     /**
@@ -143,6 +143,6 @@
  *
  * @return unique pointer to object detection pipeline.
  */
-IPipelinePtr CreatePipeline(od::ODPipelineOptions& config);
+IPipelinePtr CreatePipeline(common::PipelineOptions& config);
 
 }// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/SSDResultDecoder.hpp b/samples/ObjectDetection/include/SSDResultDecoder.hpp
index 65afb8d..4c703c1 100644
--- a/samples/ObjectDetection/include/SSDResultDecoder.hpp
+++ b/samples/ObjectDetection/include/SSDResultDecoder.hpp
@@ -21,9 +21,9 @@
      */
     SSDResultDecoder(float ObjectThreshold);
 
-    DetectedObjects Decode(const InferenceResults& results,
-                           const Size& outputFrameSize,
-                           const Size& resizedFrameSize,
+    DetectedObjects Decode(const common::InferenceResults<float>& results,
+                           const common::Size& outputFrameSize,
+                           const common::Size& resizedFrameSize,
                            const std::vector<std::string>& labels) override;
 
 private:
diff --git a/samples/ObjectDetection/include/YoloResultDecoder.hpp b/samples/ObjectDetection/include/YoloResultDecoder.hpp
index 98435e3..ae6cb5e 100644
--- a/samples/ObjectDetection/include/YoloResultDecoder.hpp
+++ b/samples/ObjectDetection/include/YoloResultDecoder.hpp
@@ -26,9 +26,9 @@
      */
     YoloResultDecoder(float NMSThreshold, float ClsThreshold, float ObjectThreshold);
 
-    DetectedObjects Decode(const InferenceResults& results,
-                           const Size& outputFrameSize,
-                           const Size& resizedFrameSize,
+    DetectedObjects Decode(const common::InferenceResults<float>& results,
+                           const common::Size& outputFrameSize,
+                           const common::Size& resizedFrameSize,
                            const std::vector <std::string>& labels) override;
 private:
     float m_NmsThreshold;
diff --git a/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp b/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp
deleted file mode 100644
index cb4c0c9..0000000
--- a/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ArmnnNetworkExecutor.hpp"
-#include "Types.hpp"
-
-#include <random>
-#include <string>
-
-namespace od
-{
-
-armnn::DataType ArmnnNetworkExecutor::GetInputDataType() const
-{
-    return m_inputBindingInfo.second.GetDataType();
-}
-
-ArmnnNetworkExecutor::ArmnnNetworkExecutor(std::string& modelPath,
-                                           std::vector<armnn::BackendId>& preferredBackends)
-: m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
-{
-    // Import the TensorFlow lite model.
-    armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
-    armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
-
-    std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
-
-    m_inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
-
-    m_outputLayerNamesList = parser->GetSubgraphOutputTensorNames(0);
-
-    std::vector<armnn::BindingPointInfo> outputBindings;
-    for(const std::string& name : m_outputLayerNamesList)
-    {
-        m_outputBindingInfo.push_back(std::move(parser->GetNetworkOutputBindingInfo(0, name)));
-    }
-
-    std::vector<std::string> errorMessages;
-    // optimize the network.
-    armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
-                                                  preferredBackends,
-                                                  m_Runtime->GetDeviceSpec(),
-                                                  armnn::OptimizerOptions(),
-                                                  armnn::Optional<std::vector<std::string>&>(errorMessages));
-
-    if (!optNet)
-    {
-        const std::string errorMessage{"ArmnnNetworkExecutor: Failed to optimize network"};
-        ARMNN_LOG(error) << errorMessage;
-        throw armnn::Exception(errorMessage);
-    }
-
-    // Load the optimized network onto the m_Runtime device
-    std::string errorMessage;
-    if (armnn::Status::Success != m_Runtime->LoadNetwork(m_NetId, std::move(optNet), errorMessage))
-    {
-        ARMNN_LOG(error) << errorMessage;
-    }
-
-    //pre-allocate memory for output (the size of it never changes)
-    for (int it = 0; it < m_outputLayerNamesList.size(); ++it)
-    {
-        const armnn::DataType dataType = m_outputBindingInfo[it].second.GetDataType();
-        const armnn::TensorShape& tensorShape = m_outputBindingInfo[it].second.GetShape();
-
-        InferenceResult oneLayerOutResult;
-        switch (dataType)
-        {
-            case armnn::DataType::Float32:
-            {
-                oneLayerOutResult.resize(tensorShape.GetNumElements(), 0);
-                break;
-            }
-            default:
-            {
-                errorMessage = "ArmnnNetworkExecutor: unsupported output tensor data type";
-                ARMNN_LOG(error) << errorMessage << " " << log_as_int(dataType);
-                throw armnn::Exception(errorMessage);
-            }
-        }
-
-        m_OutputBuffer.emplace_back(oneLayerOutResult);
-
-        // Make ArmNN output tensors
-        m_OutputTensors.reserve(m_OutputBuffer.size());
-        for (size_t it = 0; it < m_OutputBuffer.size(); ++it)
-        {
-            m_OutputTensors.emplace_back(std::make_pair(
-                    m_outputBindingInfo[it].first,
-                    armnn::Tensor(m_outputBindingInfo[it].second,
-                                  m_OutputBuffer.at(it).data())
-            ));
-        }
-    }
-
-}
-
-void ArmnnNetworkExecutor::PrepareTensors(const void* inputData, const size_t dataBytes)
-{
-    assert(m_inputBindingInfo.second.GetNumBytes() >= dataBytes);
-    m_InputTensors.clear();
-    m_InputTensors = {{ m_inputBindingInfo.first, armnn::ConstTensor(m_inputBindingInfo.second, inputData)}};
-}
-
-bool ArmnnNetworkExecutor::Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults)
-{
-    /* Prepare tensors if they are not ready */
-    ARMNN_LOG(debug) << "Preparing tensors...";
-    this->PrepareTensors(inputData, dataBytes);
-    ARMNN_LOG(trace) << "Running inference...";
-
-    armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetId, m_InputTensors, m_OutputTensors);
-
-    std::stringstream inferenceFinished;
-    inferenceFinished << "Inference finished with code {" << log_as_int(ret) << "}\n";
-
-    ARMNN_LOG(trace) << inferenceFinished.str();
-
-    if (ret == armnn::Status::Failure)
-    {
-        ARMNN_LOG(error) << "Failed to perform inference.";
-    }
-
-    outResults.reserve(m_outputLayerNamesList.size());
-    outResults = m_OutputBuffer;
-
-    return (armnn::Status::Success == ret);
-}
-
-Size ArmnnNetworkExecutor::GetImageAspectRatio()
-{
-    const auto shape = m_inputBindingInfo.second.GetShape();
-    assert(shape.GetNumDimensions() == 4);
-    armnnUtils::DataLayoutIndexed nhwc(armnn::DataLayout::NHWC);
-    return Size(shape[nhwc.GetWidthIndex()],
-                shape[nhwc.GetHeightIndex()]);
-}
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/ImageUtils.cpp b/samples/ObjectDetection/src/ImageUtils.cpp
index 9a3ed17..05b8a66 100644
--- a/samples/ObjectDetection/src/ImageUtils.cpp
+++ b/samples/ObjectDetection/src/ImageUtils.cpp
@@ -15,7 +15,7 @@
 }
 
 void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults, cv::Mat& inputFrame,
-                               std::vector<std::tuple<std::string, od::BBoxColor>>& labels)
+                               std::vector<std::tuple<std::string, common::BBoxColor>>& labels)
 {
     for(const od::DetectedObject& object : decodedResults)
     {
@@ -86,7 +86,7 @@
 }
 
 
-void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const od::Size& aspectRatio)
+void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const common::Size& aspectRatio)
 {
     if(&dest != &frame)
     {
@@ -119,7 +119,7 @@
     }
 }
 
-void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const od::Size& destSize)
+void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const common::Size& destSize)
 {
     ResizeFrame(frame, cache, destSize);
     PadFrame(cache, dest,destSize.m_Height - cache.rows,destSize.m_Width - cache.cols);
diff --git a/samples/ObjectDetection/src/Main.cpp b/samples/ObjectDetection/src/Main.cpp
index 10abb65..e057981 100644
--- a/samples/ObjectDetection/src/Main.cpp
+++ b/samples/ObjectDetection/src/Main.cpp
@@ -6,7 +6,7 @@
 #include "CvVideoFrameReader.hpp"
 #include "CvWindowOutput.hpp"
 #include "CvVideoFileWriter.hpp"
-#include "NetworkPipeline.hpp"
+#include "ObjectDetectionPipeline.hpp"
 #include "CmdArgsParser.hpp"
 
 #include <fstream>
@@ -14,6 +14,30 @@
 #include <map>
 #include <random>
 
+const std::string MODEL_NAME = "--model-name";
+const std::string VIDEO_FILE_PATH = "--video-file-path";
+const std::string MODEL_FILE_PATH = "--model-file-path";
+const std::string OUTPUT_VIDEO_FILE_PATH = "--output-video-file-path";
+const std::string LABEL_PATH = "--label-path";
+const std::string PREFERRED_BACKENDS = "--preferred-backends";
+const std::string HELP = "--help";
+
+/*
+ * The accepted options for this Object detection executable
+ */
+static std::map<std::string, std::string> CMD_OPTIONS = {
+        {VIDEO_FILE_PATH, "[REQUIRED] Path to the video file to run object detection on"},
+        {MODEL_FILE_PATH, "[REQUIRED] Path to the Object Detection model to use"},
+        {LABEL_PATH, "[REQUIRED] Path to the label set for the provided model file. "
+                     "Label file is should just be an ordered list, seperated by new line."},
+        {MODEL_NAME, "[REQUIRED] The name of the model being used. Accepted options: YOLO_V3_TINY, SSD_MOBILE"},
+        {OUTPUT_VIDEO_FILE_PATH, "[OPTIONAL] Path to the output video file with detections added in. "
+                                 "If specified will save file to disk, else displays the output to screen"},
+        {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
+                             " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
+                             " Defaults to CpuAcc,CpuRef"}
+};
+
 /*
  * Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
  */
@@ -34,10 +58,10 @@
 /*
  * Assigns a color to each label in the label set
  */
-std::vector<std::tuple<std::string, od::BBoxColor>> AssignColourToLabel(const std::string& pathToLabelFile)
+std::vector<std::tuple<std::string, common::BBoxColor>> AssignColourToLabel(const std::string& pathToLabelFile)
 {
     std::ifstream in(pathToLabelFile);
-    std::vector<std::tuple<std::string, od::BBoxColor>> labels;
+    std::vector<std::tuple<std::string, common::BBoxColor>> labels;
 
     std::string str;
     std::default_random_engine generator;
@@ -47,7 +71,7 @@
     {
         if(!str.empty())
         {
-            od::BBoxColor c{
+            common::BBoxColor c{
                 .colorCode = std::make_tuple(distribution(generator),
                                              distribution(generator),
                                              distribution(generator))
@@ -60,13 +84,13 @@
     return labels;
 }
 
-std::tuple<std::unique_ptr<od::IFrameReader<cv::Mat>>,
-           std::unique_ptr<od::IFrameOutput<cv::Mat>>>
+std::tuple<std::unique_ptr<common::IFrameReader<cv::Mat>>,
+           std::unique_ptr<common::IFrameOutput<cv::Mat>>>
            GetFrameSourceAndSink(const std::map<std::string, std::string>& options) {
 
-    std::unique_ptr<od::IFrameReader<cv::Mat>> readerPtr;
+    std::unique_ptr<common::IFrameReader<cv::Mat>> readerPtr;
 
-    std::unique_ptr<od::CvVideoFrameReader> reader = std::make_unique<od::CvVideoFrameReader>();
+    std::unique_ptr<common::CvVideoFrameReader> reader = std::make_unique<common::CvVideoFrameReader>();
     reader->Init(GetSpecifiedOption(options, VIDEO_FILE_PATH));
 
     auto enc = reader->GetSourceEncodingInt();
@@ -75,7 +99,7 @@
     auto h = reader->GetSourceHeight();
     if (!reader->ConvertToRGB())
     {
-        readerPtr = std::move(std::make_unique<od::CvVideoFrameReaderRgbWrapper>(std::move(reader)));
+        readerPtr = std::move(std::make_unique<common::CvVideoFrameReaderRgbWrapper>(std::move(reader)));
     }
     else
     {
@@ -85,14 +109,14 @@
     if(CheckOptionSpecified(options, OUTPUT_VIDEO_FILE_PATH))
     {
         std::string outputVideo = GetSpecifiedOption(options, OUTPUT_VIDEO_FILE_PATH);
-        auto writer = std::make_unique<od::CvVideoFileWriter>();
+        auto writer = std::make_unique<common::CvVideoFileWriter>();
         writer->Init(outputVideo, enc, fps, w, h);
 
         return std::make_tuple<>(std::move(readerPtr), std::move(writer));
     }
     else
     {
-        auto writer = std::make_unique<od::CvWindowOutput>();
+        auto writer = std::make_unique<common::CvWindowOutput>();
         writer->Init("Processed Video");
         return std::make_tuple<>(std::move(readerPtr), std::move(writer));
     }
@@ -109,7 +133,7 @@
     }
 
     // Create the network options
-    od::ODPipelineOptions pipelineOptions;
+    common::PipelineOptions pipelineOptions;
     pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
     pipelineOptions.m_ModelName = GetSpecifiedOption(options, MODEL_NAME);
 
@@ -127,8 +151,8 @@
     od::IPipelinePtr objectDetectionPipeline = od::CreatePipeline(pipelineOptions);
 
     auto inputAndOutput = GetFrameSourceAndSink(options);
-    std::unique_ptr<od::IFrameReader<cv::Mat>> reader = std::move(std::get<0>(inputAndOutput));
-    std::unique_ptr<od::IFrameOutput<cv::Mat>> sink = std::move(std::get<1>(inputAndOutput));
+    std::unique_ptr<common::IFrameReader<cv::Mat>> reader = std::move(std::get<0>(inputAndOutput));
+    std::unique_ptr<common::IFrameOutput<cv::Mat>> sink = std::move(std::get<1>(inputAndOutput));
 
     if (!sink->IsReady())
     {
@@ -136,7 +160,7 @@
         return 1;
     }
 
-    od::InferenceResults results;
+    common::InferenceResults<float> results;
 
     std::shared_ptr<cv::Mat> frame = reader->ReadFrame();
 
diff --git a/samples/ObjectDetection/src/NetworkPipeline.cpp b/samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
similarity index 82%
rename from samples/ObjectDetection/src/NetworkPipeline.cpp
rename to samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
index 7f05882..077caa4 100644
--- a/samples/ObjectDetection/src/NetworkPipeline.cpp
+++ b/samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
@@ -3,23 +3,23 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include "NetworkPipeline.hpp"
+#include "ObjectDetectionPipeline.hpp"
 #include "ImageUtils.hpp"
 
 namespace od
 {
 
-ObjDetectionPipeline::ObjDetectionPipeline(std::unique_ptr<ArmnnNetworkExecutor> executor,
+ObjDetectionPipeline::ObjDetectionPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                                            std::unique_ptr<IDetectionResultDecoder> decoder) :
         m_executor(std::move(executor)),
         m_decoder(std::move(decoder)){}
 
-void od::ObjDetectionPipeline::Inference(const cv::Mat& processed, InferenceResults& result)
+void od::ObjDetectionPipeline::Inference(const cv::Mat& processed, common::InferenceResults<float>& result)
 {
     m_executor->Run(processed.data, processed.total() * processed.elemSize(), result);
 }
 
-void ObjDetectionPipeline::PostProcessing(InferenceResults& inferenceResult,
+void ObjDetectionPipeline::PostProcessing(common::InferenceResults<float>& inferenceResult,
         const std::function<void (DetectedObjects)>& callback)
 {
     DetectedObjects detections = m_decoder->Decode(inferenceResult, m_inputImageSize,
@@ -37,7 +37,7 @@
     ResizeWithPad(frame, processed, m_processedFrame, m_executor->GetImageAspectRatio());
 }
 
-MobileNetSSDv1::MobileNetSSDv1(std::unique_ptr<ArmnnNetworkExecutor> executor,
+MobileNetSSDv1::MobileNetSSDv1(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                                float objectThreshold) :
         ObjDetectionPipeline(std::move(executor),
                              std::make_unique<SSDResultDecoder>(objectThreshold))
@@ -53,7 +53,7 @@
     }
 }
 
-YoloV3Tiny::YoloV3Tiny(std::unique_ptr<ArmnnNetworkExecutor> executor,
+YoloV3Tiny::YoloV3Tiny(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                        float NMSThreshold, float ClsThreshold, float ObjectThreshold) :
         ObjDetectionPipeline(std::move(executor),
                              std::move(std::make_unique<YoloResultDecoder>(NMSThreshold,
@@ -70,9 +70,9 @@
     }
 }
 
-IPipelinePtr CreatePipeline(od::ODPipelineOptions& config)
+IPipelinePtr CreatePipeline(common::PipelineOptions& config)
 {
-    auto executor = std::make_unique<od::ArmnnNetworkExecutor>(config.m_ModelFilePath, config.m_backends);
+    auto executor = std::make_unique<common::ArmnnNetworkExecutor<float>>(config.m_ModelFilePath, config.m_backends);
 
     if (config.m_ModelName == "SSD_MOBILE")
     {
diff --git a/samples/ObjectDetection/src/SSDResultDecoder.cpp b/samples/ObjectDetection/src/SSDResultDecoder.cpp
index a331921..6dfd1ab 100644
--- a/samples/ObjectDetection/src/SSDResultDecoder.cpp
+++ b/samples/ObjectDetection/src/SSDResultDecoder.cpp
@@ -12,9 +12,9 @@
 namespace od
 {
 
-DetectedObjects SSDResultDecoder::Decode(const InferenceResults& networkResults,
-    const Size& outputFrameSize,
-    const Size& resizedFrameSize,
+DetectedObjects SSDResultDecoder::Decode(const common::InferenceResults<float>& networkResults,
+    const common::Size& outputFrameSize,
+    const common::Size& resizedFrameSize,
     const std::vector<std::string>& labels)
 {
     // SSD network outputs 4 tensors: bounding boxes, labels, probabilities, number of detections.
diff --git a/samples/ObjectDetection/src/YoloResultDecoder.cpp b/samples/ObjectDetection/src/YoloResultDecoder.cpp
index ffbf7cb..f177802 100644
--- a/samples/ObjectDetection/src/YoloResultDecoder.cpp
+++ b/samples/ObjectDetection/src/YoloResultDecoder.cpp
@@ -13,9 +13,9 @@
 namespace od
 {
 
-DetectedObjects YoloResultDecoder::Decode(const InferenceResults& networkResults,
-                                         const Size& outputFrameSize,
-                                         const Size& resizedFrameSize,
+DetectedObjects YoloResultDecoder::Decode(const common::InferenceResults<float>& networkResults,
+                                         const common::Size& outputFrameSize,
+                                         const common::Size& resizedFrameSize,
                                          const std::vector<std::string>& labels)
 {
 
@@ -33,7 +33,7 @@
     DetectedObjects detectedObjects;
     DetectedObjects resultsAfterNMS;
 
-    for (const InferenceResult& result : networkResults)
+    for (const common::InferenceResult<float>& result : networkResults)
     {
         for (unsigned int i = 0; i < m_numBoxes; ++i)
         {
diff --git a/samples/ObjectDetection/test/FrameReaderTest.cpp b/samples/ObjectDetection/test/FrameReaderTest.cpp
index a4bda22..a02fa7f 100644
--- a/samples/ObjectDetection/test/FrameReaderTest.cpp
+++ b/samples/ObjectDetection/test/FrameReaderTest.cpp
@@ -20,7 +20,7 @@
         std::string file =  testResources + "/" + "Megamind.avi";
         WHEN("Frame reader is initialised") {
 
-            od::CvVideoFrameReader reader;
+            common::CvVideoFrameReader reader;
             THEN("no exception is thrown") {
                 reader.Init(file);
 
@@ -92,7 +92,7 @@
 
         WHEN("Frame reader is initialised") {
 
-            od::CvVideoFrameReader reader;
+            common::CvVideoFrameReader reader;
 
             THEN("exception is thrown") {
                 REQUIRE_THROWS(reader.Init(file));
diff --git a/samples/ObjectDetection/test/ImageUtilsTest.cpp b/samples/ObjectDetection/test/ImageUtilsTest.cpp
index e486ae1..4490cff 100644
--- a/samples/ObjectDetection/test/ImageUtilsTest.cpp
+++ b/samples/ObjectDetection/test/ImageUtilsTest.cpp
@@ -96,9 +96,9 @@
 
     std::string testResources = TEST_RESOURCE_DIR;
     REQUIRE(testResources != "");
-    std::vector<std::tuple<std::string, od::BBoxColor>> labels;
+    std::vector<std::tuple<std::string, common::BBoxColor>> labels;
 
-    od::BBoxColor c
+    common::BBoxColor c
     {
         .colorCode = std::make_tuple (0, 0, 255)
     };
diff --git a/samples/ObjectDetection/test/PipelineTest.cpp b/samples/ObjectDetection/test/PipelineTest.cpp
index 289f44f..bc5824e 100644
--- a/samples/ObjectDetection/test/PipelineTest.cpp
+++ b/samples/ObjectDetection/test/PipelineTest.cpp
@@ -4,7 +4,7 @@
 //
 #include <catch.hpp>
 #include <opencv2/opencv.hpp>
-#include <NetworkPipeline.hpp>
+#include "ObjectDetectionPipeline.hpp"
 #include "Types.hpp"
 
 static std::string GetResourceFilePath(const std::string& filename)
@@ -32,14 +32,14 @@
     std::string testResources = TEST_RESOURCE_DIR;
     REQUIRE(testResources != "");
     // Create the network options
-    od::ODPipelineOptions options;
+    common::PipelineOptions options;
     options.m_ModelFilePath = GetResourceFilePath("detect.tflite");
     options.m_ModelName = "SSD_MOBILE";
     options.m_backends = {"CpuAcc", "CpuRef"};
 
     od::IPipelinePtr objectDetectionPipeline = od::CreatePipeline(options);
 
-    od::InferenceResults results;
+    common::InferenceResults<float> results;
     cv::Mat processed;
     cv::Mat inputFrame = cv::imread(GetResourceFilePath("basketball1.png"), cv::IMREAD_COLOR);
     cv::cvtColor(inputFrame, inputFrame, cv::COLOR_BGR2RGB);
diff --git a/samples/SpeechRecognition/CMakeLists.txt b/samples/SpeechRecognition/CMakeLists.txt
new file mode 100644
index 0000000..6c6b0b6
--- /dev/null
+++ b/samples/SpeechRecognition/CMakeLists.txt
@@ -0,0 +1,62 @@
+# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+cmake_minimum_required(VERSION 3.0.2)
+
+set(CMAKE_C_STANDARD                99)
+set(CMAKE_CXX_STANDARD              14)
+
+# Make the standard a requirement => prevent fallback to previous
+# supported standard
+set(CMAKE_C_STANDARD_REQUIRED       ON)
+set(CMAKE_CXX_STANDARD_REQUIRED     ON)
+
+# We want to pass standard C/C++ flags, without gnu extensions
+set(CMAKE_C_EXTENSIONS              OFF)
+set(CMAKE_CXX_EXTENSIONS            OFF)
+
+project (speech-recognition-example)
+
+set(CMAKE_C_FLAGS_DEBUG         "-DDEBUG -O0 -g -fPIC -pthread")
+set(CMAKE_C_FLAGS_RELEASE       "-DNDEBUG -O3 -fPIC -pthread")
+
+set(CMAKE_CXX_FLAGS_DEBUG       "-DDEBUG -O0 -g -fPIC -pthread")
+set(CMAKE_CXX_FLAGS_RELEASE     "-DNDEBUG -O3 -fPIC -pthread")
+
+include(ExternalProject)
+
+# Build in release mode by default
+if (NOT CMAKE_BUILD_TYPE STREQUAL Debug)
+    set(CMAKE_BUILD_TYPE Release CACHE INTERNAL "")
+endif()
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+if (NOT DEFINED DEPENDENCIES_DIR)
+    set(DEPENDENCIES_DIR ${CMAKE_BINARY_DIR}/dependencies)
+endif()
+
+include(../common/cmake/find_armnn.cmake)
+
+include_directories(include)
+include_directories(../common/include/ArmnnUtils)
+include_directories(../common/include/Utils)
+
+file(GLOB SOURCES "src/*.cpp")
+file(GLOB COMMON_UTILS_SOURCES "../common/src/Utils/*.cpp")
+list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/Main.cpp)
+file(GLOB TEST_SOURCES "test/*.cpp")
+file(GLOB APP_MAIN "src/Main.cpp")
+
+if(BUILD_UNIT_TESTS)
+    include(cmake/unit_tests.cmake)
+endif()
+
+set(APP_TARGET_NAME "${CMAKE_PROJECT_NAME}")
+
+add_executable("${APP_TARGET_NAME}"  ${COMMON_UTILS_SOURCES} ${SOURCES} ${APP_MAIN})
+
+target_link_libraries("${APP_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} -lsndfile -lsamplerate)
+target_include_directories("${APP_TARGET_NAME}" PUBLIC ${ARMNN_INCLUDE_DIR} )
diff --git a/samples/SpeechRecognition/Readme.md b/samples/SpeechRecognition/Readme.md
new file mode 100644
index 0000000..656ba55
--- /dev/null
+++ b/samples/SpeechRecognition/Readme.md
@@ -0,0 +1,245 @@
+# Speech Recognition Example
+
+## Introduction
+This is a sample code showing automatic speech recognition using Arm NN public C++ API. The compiled application can take
+
+ * an audio file
+
+as input and produce
+ * recognised text to the console
+ 
+as output
+
+## Dependencies
+
+This example utilises `libsndfile`, `libasound` and `libsamplerate` libraries to capture the raw audio data from file, and to re-sample to the expected 
+sample rate. Top level inference API is provided by Arm NN library.
+
+### Arm NN
+
+Speech Recognition example build system does not trigger Arm NN compilation. Thus, before building the application,
+please ensure that Arm NN libraries and header files are available on your build platform.
+The application executable binary dynamically links with the following Arm NN libraries:
+* libarmnn.so
+* libarmnnTfLiteParser.so
+
+The build script searches for available Arm NN libraries in the following order:
+1. Inside custom user directory specified by ARMNN_LIB_DIR cmake option.
+2. Inside the current Arm NN repository, assuming that Arm NN was built following [these instructions](../../BuildGuideCrossCompilation.md).
+3. Inside default locations for system libraries, assuming Arm NN was installed from deb packages.
+
+Arm NN header files will be searched in parent directory of found libraries files under `include` directory, i.e.
+libraries found in `/usr/lib` or `/usr/lib64` and header files in `/usr/include` (or `${ARMNN_LIB_DIR}/include`).
+
+Please see [find_armnn.cmake](./cmake/find_armnn.cmake) for implementation details.
+
+## Building
+There is one flow for building this application:
+* native build on a host platform
+
+### Build Options
+* ARMNN_LIB_DIR - point to the custom location of the Arm NN libs and headers.
+* BUILD_UNIT_TESTS -  set to `1` to build tests. Additionally to the main application, `speech-recognition-example-tests`
+unit tests executable will be created.
+
+### Native Build
+To build this application on a host platform, firstly ensure that required dependencies are installed:
+For example, for raspberry PI:
+```commandline
+sudo apt-get update
+sudo apt-get -yq install libsndfile1-dev
+sudo apt-get -yq install libasound2-dev
+sudo apt-get -yq install libsamplerate-dev
+```
+
+To build demo application, create a build directory:
+```commandline
+mkdir build
+cd build
+```
+If you have already installed Arm NN and and the required libraries:
+
+Inside build directory, run cmake and make commands:
+```commandline
+cmake  ..
+make
+```
+This will build the following in bin directory:
+* `speech-recognition-example` - application executable
+
+If you have custom Arm NN location, use `ARMNN_LIB_DIR` options:
+```commandline
+cmake  -DARMNN_LIB_DIR=/path/to/armnn ..
+make
+```
+## Executing
+
+Once the application executable is built, it can be executed with the following options:
+* --audio-file-path: Path to the audio file to run speech recognition on **[REQUIRED]**
+* --model-file-path: Path to the Speech Recognition model to use **[REQUIRED]**
+
+* --preferred-backends: Takes the preferred backends in preference order, separated by comma.
+                        For example: `CpuAcc,GpuAcc,CpuRef`. Accepted options: [`CpuAcc`, `CpuRef`, `GpuAcc`].
+                        Defaults to `CpuRef` **[OPTIONAL]**
+
+### Speech Recognition on a supplied audio file
+
+To run speech recognition on a supplied audio file and output the result to console:
+```commandline
+./speech-recognition-example --audio-file-path /path/to/audio/file --model-file-path /path/to/model/file
+```
+---
+
+# Application Overview
+This section provides a walkthrough of the application, explaining in detail the steps:
+1. Initialisation
+    1. Reading from Audio Source
+2. Creating a Network
+    1. Creating Parser and Importing Graph
+    3. Optimizing Graph for Compute Device
+    4. Creating Input and Output Binding Information
+3. Speech Recognition pipeline
+    1. Pre-processing the Captured Audio
+    2. Making Input and Output Tensors
+    3. Executing Inference
+    4. Postprocessing
+    5. Decoding and Processing Inference Output
+
+### Initialisation
+
+##### Reading from Audio Source
+After parsing user arguments, the chosen audio file is loaded into an AudioCapture object.
+We use [`AudioCapture`](./include/AudioCapture.hpp) in our main function to capture appropriately sized audio blocks from the source using the
+`Next()` function.
+
+The `AudioCapture` object also re-samples the audio input to a desired sample rate, and sets the number of channels used to one channel (i.e `mono`)
+
+### Creating a Network
+
+All operations with Arm NN and networks are encapsulated in [`ArmnnNetworkExecutor`](./include/ArmnnNetworkExecutor.hpp)
+class.
+
+##### Creating Parser and Importing Graph
+The first step with Arm NN SDK is to import a graph from file by using the appropriate parser.
+
+The Arm NN SDK provides parsers for reading graphs from a variety of model formats. In our application we specifically
+focus on `.tflite, .pb, .onnx` models.
+
+Based on the extension of the provided model file, the corresponding parser is created and the network file loaded with
+`CreateNetworkFromBinaryFile()` method. The parser will handle the creation of the underlying Arm NN graph.
+
+Current example accepts tflite format model files, we use `ITfLiteParser`:
+```c++
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
+armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
+```
+
+##### Optimizing Graph for Compute Device
+Arm NN supports optimized execution on multiple CPU and GPU devices. Prior to executing a graph, we must select the
+appropriate device context. We do this by creating a runtime context with default options with `IRuntime()`.
+
+For example:
+```c++
+#include "armnn/ArmNN.hpp"
+
+auto runtime = armnn::IRuntime::Create(armnn::IRuntime::CreationOptions());
+```
+
+We can optimize the imported graph by specifying a list of backends in order of preference and implement
+backend-specific optimizations. The backends are identified by a string unique to the backend,
+for example `CpuAcc, GpuAcc, CpuRef`.
+
+For example:
+```c++
+std::vector<armnn::BackendId> backends{"CpuAcc", "GpuAcc", "CpuRef"};
+```
+
+Internally and transparently, Arm NN splits the graph into subgraph based on backends, it calls a optimize subgraphs
+function on each of them and, if possible, substitutes the corresponding subgraph in the original graph with
+its optimized version.
+
+Using the `Optimize()` function we optimize the graph for inference and load the optimized network onto the compute
+device with `LoadNetwork()`. This function creates the backend-specific workloads
+for the layers and a backend specific workload factory which is called to create the workloads.
+
+For example:
+```c++
+armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
+                                              backends,
+                                              m_Runtime->GetDeviceSpec(),
+                                              armnn::OptimizerOptions());
+std::string errorMessage;
+runtime->LoadNetwork(0, std::move(optNet), errorMessage));
+std::cerr << errorMessage << std::endl;
+```
+
+##### Creating Input and Output Binding Information
+Parsers can also be used to extract the input information for the network. By calling `GetSubgraphInputTensorNames`
+we extract all the input names and, with `GetNetworkInputBindingInfo`, bind the input points of the graph.
+For example:
+```c++
+std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
+auto inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
+```
+The input binding information contains all the essential information about the input. It is a tuple consisting of
+integer identifiers for bindable layers (inputs, outputs) and the tensor info (data type, quantization information,
+number of dimensions, total number of elements).
+
+Similarly, we can get the output binding information for an output layer by using the parser to retrieve output
+tensor names and calling `GetNetworkOutputBindingInfo()`.
+
+### Speech Recognition pipeline
+
+The speech recognition pipeline has 3 steps to perform, data pre-processing, run inference and decode inference results
+in the post-processing step.
+
+See [`SpeechRecognitionPipeline`](include/SpeechRecognitionPipeline.hpp) for more details.
+
+#### Pre-processing the Audio Input
+Each frame captured from source is read and stored by the AudioCapture object.
+It's `Next()` function provides us with the correctly positioned window of data, sized appropriately for the given model, to pre-process before inference.
+
+```c++
+std::vector<float> audioBlock = capture.Next();
+...
+std::vector<int8_t> preprocessedData = asrPipeline->PreProcessing<float, int8_t>(audioBlock, preprocessor);
+```
+
+The `MFCC` class is then used to extract the Mel-frequency Cepstral Coefficients (MFCCs, [see Wikipedia](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum)) from each stored audio frame in the provided window of audio, to be used as features for the network. MFCCs are the result of computing the dot product of the Discrete Cosine Transform (DCT) Matrix and the log of the Mel energy.
+
+After all the MFCCs needed for an inference have been extracted from the audio data, we convolve them with 1-dimensional Savitzky-Golay filters to compute the first and second MFCC derivatives with respect to time. The MFCCs and the derivatives are concatenated to make the input tensor for the model
+
+
+#### Executing Inference
+```c++
+common::InferenceResults results;
+...
+asrPipeline->Inference<int8_t>(preprocessedData, results);
+```
+Inference step will call `ArmnnNetworkExecutor::Run` method that will prepare input tensors and execute inference.
+A compute device performs inference for the loaded network using the `EnqueueWorkload()` function of the runtime context.
+For example:
+```c++
+//const void* inputData = ...;
+//outputTensors were pre-allocated before
+
+armnn::InputTensors inputTensors = {{ inputBindingInfo.first,armnn::ConstTensor(inputBindingInfo.second, inputData)}};
+runtime->EnqueueWorkload(0, inputTensors, outputTensors);
+```
+We allocate memory for output data once and map it to output tensor objects. After successful inference, we read data
+from the pre-allocated output data buffer. See [`ArmnnNetworkExecutor::ArmnnNetworkExecutor`](./src/ArmnnNetworkExecutor.cpp)
+and [`ArmnnNetworkExecutor::Run`](./src/ArmnnNetworkExecutor.cpp) for more details.
+
+#### Postprocessing
+
+##### Decoding and Processing Inference Output
+The output from the inference must be decoded to obtain the recognised characters from the speech. 
+A simple greedy decoder classifies the results by taking the highest element of the output as a key for the labels dictionary. 
+The value returned is a character which is appended to a list, and the list is filtered to remove unwanted characters. 
+
+```c++
+asrPipeline->PostProcessing<int8_t>(results, isFirstWindow, !capture.HasNext(), currentRContext);
+```
+The produced string is displayed on the console.
\ No newline at end of file
diff --git a/samples/SpeechRecognition/cmake/unit_tests.cmake b/samples/SpeechRecognition/cmake/unit_tests.cmake
new file mode 100644
index 0000000..47c4f4b
--- /dev/null
+++ b/samples/SpeechRecognition/cmake/unit_tests.cmake
@@ -0,0 +1,34 @@
+# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+
+set(TEST_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/test/resources)
+file(MAKE_DIRECTORY ${TEST_RESOURCES_DIR})
+add_definitions (-DTEST_RESOURCE_DIR="${TEST_RESOURCES_DIR}")
+set(TEST_TARGET_NAME "${CMAKE_PROJECT_NAME}-tests")
+
+file(GLOB TEST_SOURCES "test/*")
+
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/test/resources)
+include(../common/cmake/find_catch.cmake)
+
+add_executable("${TEST_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${SOURCES} ${TEST_SOURCES} )
+
+ExternalProject_Add(passport
+        URL https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav
+        DOWNLOAD_NO_EXTRACT 1
+        CONFIGURE_COMMAND ""
+        BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/myVoiceIsMyPassportVerifyMe04.wav ${CMAKE_CURRENT_SOURCE_DIR}/test/resources
+        INSTALL_COMMAND ""
+        )
+
+add_dependencies(
+        "${TEST_TARGET_NAME}"
+        "passport"
+        "catch2-headers"
+)
+
+target_include_directories("${TEST_TARGET_NAME}" PUBLIC ${TEST_TPIP_INCLUDE}
+    ${ARMNN_INCLUDE_DIR}
+     ${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR} ${COMMON_INCLUDE_DIR})
+
+target_link_libraries("${TEST_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} -lsndfile -lsamplerate)
\ No newline at end of file
diff --git a/samples/SpeechRecognition/include/AudioCapture.hpp b/samples/SpeechRecognition/include/AudioCapture.hpp
new file mode 100644
index 0000000..90c2ecc
--- /dev/null
+++ b/samples/SpeechRecognition/include/AudioCapture.hpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <string>
+#include <iostream>
+
+#include <math.h>
+
+#include <vector>
+
+#include <exception>
+
+#include "SlidingWindow.hpp"
+
+namespace asr
+{
+
+/**
+* @brief Class used to capture the audio data loaded from file, and to provide a method of
+ * extracting correctly positioned and appropriately sized audio windows
+*
+*/
+    class AudioCapture
+    {
+    public:
+
+        SlidingWindow<const float> m_window;
+        int lastReadIdx= 0;
+
+        /**
+        * @brief Default constructor
+        */
+        AudioCapture()
+        {};
+
+        /**
+        * @brief Function to load the audio data captured from the
+         * input file to memory.
+        */
+        std::vector<float> LoadAudioFile(std::string filePath);
+
+        /**
+        * @brief Function to initialize the sliding window. This will set its position in memory, its
+         * window size and its stride.
+        */
+        void InitSlidingWindow(float* data, size_t dataSize, int minSamples, size_t stride);
+
+        /**
+        * Checks whether there is another block of audio in memory to read
+        */
+        bool HasNext();
+
+        /**
+        * Retrieves the next block of audio if its available
+        */
+        std::vector<float> Next();
+    };
+} // namespace asr
\ No newline at end of file
diff --git a/samples/SpeechRecognition/include/DataStructures.hpp b/samples/SpeechRecognition/include/DataStructures.hpp
new file mode 100644
index 0000000..9922265
--- /dev/null
+++ b/samples/SpeechRecognition/include/DataStructures.hpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <stdio.h>
+#include <iterator>
+
+/**
+ * Class Array2d is a data structure that represents a two dimensional array.
+ * The data is allocated in contiguous memory, arranged row-wise
+ * and individual elements can be accessed with the () operator.
+ * For example a two dimensional array D of size (M, N) can be accessed:
+ *
+ *               _|<------------- col size = N  -------->|
+ *               |  D(r=0, c=0) D(r=0, c=1)... D(r=0, c=N)
+ *               |  D(r=1, c=0) D(r=1, c=1)... D(r=1, c=N)
+ *               |  ...
+ *    row size = M  ...
+ *               |  ...
+ *               _  D(r=M, c=0) D(r=M, c=1)... D(r=M, c=N)
+ *
+ */
+template<typename T>
+class Array2d
+{
+private:
+    size_t m_rows;
+    size_t m_cols;
+    T* m_data;
+
+public:
+    /**
+     * Creates the array2d with the given sizes.
+     *
+     * @param rows  number of rows.
+     * @param cols  number of columns.
+     */
+    Array2d(unsigned rows, unsigned cols)
+    {
+        if (rows == 0 || cols == 0) {
+            printf("Array2d constructor has 0 size.\n");
+            m_data = nullptr;
+            return;
+        }
+        m_rows = rows;
+        m_cols = cols;
+        m_data = new T[rows * cols];
+    }
+
+    ~Array2d()
+    {
+        delete[] m_data;
+    }
+
+    T& operator() (unsigned int row, unsigned int col)
+    {
+        return m_data[m_cols * row + col];
+    }
+
+    T operator() (unsigned int row, unsigned int col) const
+    {
+        return m_data[m_cols * row + col];
+    }
+
+    /**
+     * Gets rows number of the current array2d.
+     * @return number of rows.
+     */
+    size_t size(size_t dim)
+    {
+        switch (dim)
+        {
+            case 0:
+                return m_rows;
+            case 1:
+                return m_cols;
+            default:
+                return 0;
+        }
+    }
+
+    /**
+     * Gets the array2d total size.
+     */
+    size_t totalSize()
+    {
+        return m_rows * m_cols;
+    }
+
+    /**
+     * array2d iterator.
+     */
+    using iterator=T*;
+    using const_iterator=T const*;
+
+    iterator begin() { return m_data; }
+    iterator end() { return m_data + totalSize(); }
+    const_iterator begin() const { return m_data; }
+    const_iterator end() const { return m_data + totalSize(); };
+};
diff --git a/samples/SpeechRecognition/include/Decoder.hpp b/samples/SpeechRecognition/include/Decoder.hpp
new file mode 100644
index 0000000..69d97cc
--- /dev/null
+++ b/samples/SpeechRecognition/include/Decoder.hpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <string>
+#include <map>
+#include <vector>
+#include <algorithm>
+#include <cmath>
+
+# pragma once
+
+namespace asr
+{
+/**
+* @brief Class used to Decode the output of the ASR inference
+*
+*/
+    class Decoder
+    {
+    public:
+        std::map<int, std::string> m_labels;
+        /**
+        * @brief Default constructor
+        * @param[in] labels - map of labels to be used for decoding to text.
+        */
+        Decoder(std::map<int, std::string>& labels);
+
+        /**
+        * @brief Function to decode the output into a text string
+        * @param[in] output - the output vector to decode.
+        */
+        template<typename T>
+        std::string DecodeOutput(std::vector<T>& contextToProcess)
+        {
+            int rowLength = 29;
+
+            std::vector<char> unfilteredText;
+
+            for(int row = 0; row < contextToProcess.size()/rowLength; ++row)
+            {
+                std::vector<int16_t> rowVector;
+                for(int j = 0; j < rowLength; ++j)
+                {
+                    rowVector.emplace_back(static_cast<int16_t>(contextToProcess[row * rowLength + j]));
+                }
+
+                int max_index = std::distance(rowVector.begin(),std::max_element(rowVector.begin(), rowVector.end()));
+                unfilteredText.emplace_back(this->m_labels.at(max_index)[0]);
+            }
+
+            std::string filteredText = FilterCharacters(unfilteredText);
+            return filteredText;
+        }
+
+        /**
+        * @brief Function to filter out unwanted characters
+        * @param[in] unfiltered - the unfiltered output to be processed.
+        */
+        std::string FilterCharacters(std::vector<char>& unfiltered);
+    };
+} // namespace asr
diff --git a/samples/SpeechRecognition/include/MFCC.hpp b/samples/SpeechRecognition/include/MFCC.hpp
new file mode 100644
index 0000000..14b6d9f
--- /dev/null
+++ b/samples/SpeechRecognition/include/MFCC.hpp
@@ -0,0 +1,244 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <vector>
+#include <cstdint>
+#include <cmath>
+#include <limits>
+#include <string>
+
+/* MFCC's consolidated parameters */
+class MfccParams
+{
+public:
+    float       m_samplingFreq;
+    int         m_numFbankBins;
+    float       m_melLoFreq;
+    float       m_melHiFreq;
+    int         m_numMfccFeatures;
+    int         m_frameLen;
+    int         m_frameLenPadded;
+    bool        m_useHtkMethod;
+    int         m_numMfccVectors;
+
+    /** @brief  Constructor */
+    MfccParams(const float samplingFreq, const int numFbankBins,
+               const float melLoFreq, const float melHiFreq,
+               const int numMfccFeats, const int frameLen,
+               const bool useHtkMethod, const int numMfccVectors);
+
+    /* Delete the default constructor */
+    MfccParams()  = delete;
+
+    /* Default destructor */
+    ~MfccParams() = default;
+
+    /** @brief  String representation of parameters */
+    std::string Str();
+};
+
+/**
+ * @brief   Class for MFCC feature extraction.
+ *          Based on https://github.com/ARM-software/ML-KWS-for-MCU/blob/master/Deployment/Source/MFCC/mfcc.cpp
+ *          This class is designed to be generic and self-sufficient but
+ *          certain calculation routines can be overridden to accommodate
+ *          use-case specific requirements.
+ */
+class MFCC
+{
+
+public:
+
+    /**
+    * @brief        Extract MFCC  features for one single small frame of
+    *               audio data e.g. 640 samples.
+    * @param[in]    audioData - Vector of audio samples to calculate
+    *               features for.
+    * @return       Vector of extracted MFCC features.
+    **/
+    std::vector<float> MfccCompute(const std::vector<float>& audioData);
+
+    MfccParams _m_params;
+
+    /**
+     * @brief       Constructor
+     * @param[in]   params - MFCC parameters
+    */
+    MFCC(const MfccParams& params);
+
+    /* Delete the default constructor */
+    MFCC() = delete;
+
+    /** @brief  Default destructor */
+    ~MFCC() = default;
+
+    /** @brief  Initialise */
+    void Init();
+
+    /**
+     * @brief        Extract MFCC features and quantise for one single small
+     *               frame of audio data e.g. 640 samples.
+     * @param[in]    audioData - Vector of audio samples to calculate
+     *               features for.
+     * @param[in]    quantScale - quantisation scale.
+     * @param[in]    quantOffset - quantisation offset
+     * @return      Vector of extracted quantised MFCC features.
+     **/
+    template<typename T>
+    std::vector<T> MfccComputeQuant(const std::vector<float>& audioData,
+                                    const float quantScale,
+                                    const int quantOffset)
+    {
+        this->_MfccComputePreFeature(audioData);
+        float minVal = std::numeric_limits<T>::min();
+        float maxVal = std::numeric_limits<T>::max();
+
+        std::vector<T> mfccOut(this->_m_params.m_numMfccFeatures);
+        const size_t numFbankBins = this->_m_params.m_numFbankBins;
+
+        /* Take DCT. Uses matrix mul. */
+        for (size_t i = 0, j = 0; i < mfccOut.size(); ++i, j += numFbankBins)
+        {
+            float sum = 0;
+            for (size_t k = 0; k < numFbankBins; ++k)
+            {
+                sum += this->_m_dctMatrix[j + k] * this->_m_melEnergies[k];
+            }
+            /* Quantize to T. */
+            sum = std::round((sum / quantScale) + quantOffset);
+            mfccOut[i] = static_cast<T>(std::min<float>(std::max<float>(sum, minVal), maxVal));
+        }
+
+        return mfccOut;
+    }
+
+    /* Constants */
+    static constexpr float logStep = 1.8562979903656 / 27.0;
+    static constexpr float freqStep = 200.0 / 3;
+    static constexpr float minLogHz = 1000.0;
+    static constexpr float minLogMel = minLogHz / freqStep;
+
+protected:
+    /**
+     * @brief       Project input frequency to Mel Scale.
+     * @param[in]   freq - input frequency in floating point
+     * @param[in]   useHTKmethod - bool to signal if HTK method is to be
+     *              used for calculation
+     * @return      Mel transformed frequency in floating point
+     **/
+    static float MelScale(const float    freq,
+                          const bool     useHTKMethod = true);
+
+    /**
+     * @brief       Inverse Mel transform - convert MEL warped frequency
+     *              back to normal frequency
+     * @param[in]   freq - Mel frequency in floating point
+     * @param[in]   useHTKmethod - bool to signal if HTK method is to be
+     *              used for calculation
+     * @return      Real world frequency in floating point
+     **/
+    static float InverseMelScale(const float melFreq,
+                                 const bool  useHTKMethod = true);
+
+    /**
+     * @brief       Populates MEL energies after applying the MEL filter
+     *              bank weights and adding them up to be placed into
+     *              bins, according to the filter bank's first and last
+     *              indices (pre-computed for each filter bank element
+     *              by _CreateMelFilterBank function).
+     * @param[in]   fftVec                  Vector populated with FFT magnitudes
+     * @param[in]   melFilterBank           2D Vector with filter bank weights
+     * @param[in]   filterBankFilterFirst   Vector containing the first indices of filter bank
+     *                                      to be used for each bin.
+     * @param[in]   filterBankFilterLast    Vector containing the last indices of filter bank
+     *                                      to be used for each bin.
+     * @param[out]  melEnergies             Pre-allocated vector of MEL energies to be
+     *                                      populated.
+     * @return      true if successful, false otherwise
+     */
+    virtual bool ApplyMelFilterBank(
+            std::vector<float>&                 fftVec,
+            std::vector<std::vector<float>>&    melFilterBank,
+            std::vector<int32_t>&               filterBankFilterFirst,
+            std::vector<int32_t>&               filterBankFilterLast,
+            std::vector<float>&                 melEnergies);
+
+    /**
+     * @brief           Converts the Mel energies for logarithmic scale
+     * @param[in/out]   melEnergies - 1D vector of Mel energies
+     **/
+    virtual void ConvertToLogarithmicScale(std::vector<float>& melEnergies);
+
+    /**
+     * @brief       Create a matrix used to calculate Discrete Cosine
+     *              Transform.
+     * @param[in]   inputLength - input length of the buffer on which
+     *              DCT will be performed
+     * @param[in]   coefficientCount - Total coefficients per input
+     *              length
+     * @return      1D vector with inputLength x coefficientCount elements
+     *              populated with DCT coefficients.
+     */
+    virtual std::vector<float> CreateDCTMatrix(
+            const int32_t inputLength,
+            const int32_t coefficientCount);
+
+    /**
+     * @brief       Given the low and high Mel values, get the normaliser
+     *              for weights to be applied when populating the filter
+     *              bank.
+     * @param[in]   leftMel - low Mel frequency value
+     * @param[in]   rightMel - high Mel frequency value
+     * @param[in]   useHTKMethod - bool to signal if HTK method is to be
+     *              used for calculation
+     */
+    virtual float GetMelFilterBankNormaliser(
+            const float&   leftMel,
+            const float&   rightMel,
+            const bool     useHTKMethod);
+
+private:
+
+    std::vector<float>              _m_frame;
+    std::vector<float>              _m_buffer;
+    std::vector<float>              _m_melEnergies;
+    std::vector<float>              _m_windowFunc;
+    std::vector<std::vector<float>> _m_melFilterBank;
+    std::vector<float>              _m_dctMatrix;
+    std::vector<int32_t>            _m_filterBankFilterFirst;
+    std::vector<int32_t>            _m_filterBankFilterLast;
+    bool                            _m_filterBankInitialised;
+
+    /**
+     * @brief       Initialises the filter banks and the DCT matrix **/
+    void _InitMelFilterBank();
+
+    /**
+     * @brief       Signals whether the instance of MFCC has had its
+     *              required buffers initialised
+     * @return      True if initialised, false otherwise
+     **/
+    bool _IsMelFilterBankInited();
+
+    /**
+     * @brief       Create mel filter banks for MFCC calculation.
+     * @return      2D vector of floats
+     **/
+    std::vector<std::vector<float>> _CreateMelFilterBank();
+
+    /**
+     * @brief       Computes and populates internal memeber buffers used
+     *              in MFCC feature calculation
+     * @param[in]   audioData - 1D vector of 16-bit audio data
+     */
+    void _MfccComputePreFeature(const std::vector<float>& audioData);
+
+    /** @brief       Computes the magnitude from an interleaved complex array */
+    void _ConvertToPowerSpectrum();
+
+};
+
diff --git a/samples/SpeechRecognition/include/MathUtils.hpp b/samples/SpeechRecognition/include/MathUtils.hpp
new file mode 100644
index 0000000..5f81fb6
--- /dev/null
+++ b/samples/SpeechRecognition/include/MathUtils.hpp
@@ -0,0 +1,85 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <vector>
+#include <cmath>
+#include <cstdint>
+#include <numeric>
+
+class MathUtils
+{
+
+public:
+
+    /**
+     * @brief       Computes the FFT for the input vector
+     * @param[in]   input       Floating point vector of input elements
+     * @param[out]  fftOutput   Output buffer to be populated by computed
+     *                          FFTs
+     * @return      none
+     */
+    static void FftF32(std::vector<float>& input,
+                       std::vector<float>& fftOutput);
+
+
+    /**
+     * @brief       Computes the dot product of two 1D floating point
+     *              vectors.
+     *              result = sum(srcA[0]*srcB[0] + srcA[1]*srcB[1] + ..)
+     * @param[in]   srcPtrA     pointer to the first element of first
+     *                          array
+     * @param[in]   srcPtrB     pointer to the first element of second
+     *                          array
+     * @param[in]   srcLen      Number of elements in the array/vector
+     * @return      dot product
+     */
+    static float DotProductF32(float* srcPtrA, float* srcPtrB,
+                               const int srcLen);
+
+    /**
+     * @brief       Computes the squared magnitude of floating point
+     *              complex number array.
+     * @param[in]   ptrSrc      pointer to the first element of input
+     *                          array
+     * @param[in]   srcLen      Number of elements in the array/vector
+     * @param[out]  ptrDst      Output buffer to be populated
+     * @param[in]   dstLen      output buffer len (for sanity check only)
+     * @return      true if successful, false otherwise
+     */
+    static bool ComplexMagnitudeSquaredF32(float* ptrSrc,
+                                           const int srcLen,
+                                           float* ptrDst,
+                                           const int dstLen);
+
+    /**
+         * @brief       Computes the natural logarithms of input floating point
+         *              vector
+         * @param[in]   input   Floating point input vector
+         * @param[out]  output  Pre-allocated buffer to be populated with
+         *                      natural log values of each input element
+         * @return      none
+         */
+    static void VecLogarithmF32(std::vector <float>& input,
+                                std::vector <float>& output);
+
+    /**
+         * @brief       Gets the mean of a floating point array of elements
+         * @param[in]   ptrSrc  pointer to the first element
+         * @param[in]   srcLen  Number of elements in the array/vector
+         * @return      average value
+         */
+    static float MeanF32(float* ptrSrc, const uint32_t srcLen);
+
+    /**
+     * @brief       Gets the standard deviation of a floating point array
+     *              of elements
+     * @param[in]   ptrSrc  pointer to the first element
+     * @param[in]   srcLen  Number of elements in the array/vector
+     * @param[in]   mean    pre-computed mean value
+     * @return      standard deviation value
+     */
+    static float StdDevF32(float* ptrSrc, const uint32_t srcLen,
+                           const float mean);
+};
diff --git a/samples/SpeechRecognition/include/Preprocess.hpp b/samples/SpeechRecognition/include/Preprocess.hpp
new file mode 100644
index 0000000..80c5684
--- /dev/null
+++ b/samples/SpeechRecognition/include/Preprocess.hpp
@@ -0,0 +1,175 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DataStructures.hpp"
+#include "SlidingWindow.hpp"
+#include <numeric>
+#include "MFCC.hpp"
+
+/* Class to facilitate pre-processing calculation for Wav2Letter model
+     * for ASR */
+using AudioWindow = SlidingWindow <const float>;
+
+class Preprocess
+{
+public:
+
+    MFCC                _m_mfcc;            /* MFCC instance */
+
+    /* Actual buffers to be populated */
+    Array2d<float>      _m_mfccBuf;         /* Contiguous buffer 1D: MFCC */
+    Array2d<float>      _m_delta1Buf;       /* Contiguous buffer 1D: Delta 1 */
+    Array2d<float>      _m_delta2Buf;       /* Contiguous buffer 1D: Delta 2 */
+
+    uint32_t            _m_windowLen;       /* Window length for MFCC */
+    uint32_t            _m_windowStride;    /* Window stride len for MFCC */
+    AudioWindow         _m_window;          /* Sliding window */
+
+    /**
+     * @brief       Constructor
+     * @param[in]   numMfccFeatures     number of MFCC features per window
+     * @param[in]   windowLen           number of elements in a window
+     * @param[in]   windowStride        stride (in number of elements) for
+     *                                  moving the window
+     * @param[in]   numMfccVectors      number of MFCC vectors per window
+    */
+    Preprocess(
+            const uint32_t  windowLen,
+            const uint32_t  windowStride,
+            const MFCC mfccInst);
+    Preprocess() = delete;
+    ~Preprocess();
+
+    /**
+     * @brief       Calculates the features required from audio data. This
+     *              includes MFCC, first and second order deltas,
+     *              normalisation and finally, quantisation. The tensor is
+     *              populated with feature from a given window placed along
+     *              in a single row.
+     * @param[in]   audioData     pointer to the first element of audio data
+     * @param[in]   audioDataLen  number of elements in the audio data
+     * @param[in]   tensor        tensor to be populated
+     * @return      true if successful, false in case of error.
+     */
+    bool Invoke(const float* audioData,
+                const uint32_t  audioDataLen,
+                std::vector<int8_t>& output,
+                int quantOffset,
+                float quantScale);
+
+
+protected:
+    /**
+     * @brief Computes the first and second order deltas for the
+     *        MFCC buffers - they are assumed to be populated.
+     *
+     * @param[in]  mfcc   MFCC buffers
+     * @param[out] delta1 result of the first diff computation
+     * @param[out] delta2 result of the second diff computation
+     *
+     * @return true if successful, false otherwise
+     */
+    static bool _ComputeDeltas(Array2d<float>& mfcc,
+                               Array2d<float>& delta1,
+                               Array2d<float>& delta2);
+
+    /**
+     * @brief      Given a 2D vector of floats, computes the mean
+     * @param[in]   vec      vector of vector of floats
+     * @return      mean value
+     */
+    static float _GetMean(Array2d<float>& vec);
+
+    /**
+     * @brief       Given a 2D vector of floats, computes the stddev
+     * @param[in]   vec   vector of vector of floats
+     * @param[in]   mean     mean value of the vector passed in
+     * @return      stddev value
+     */
+    static float _GetStdDev(Array2d<float>& vec,
+                            const float mean);
+
+    /**
+     * @brief           Given a 2D vector of floats, normalises it using
+     *                  the mean and the stddev
+     * @param[in/out]   vec      vector of vector of floats
+     * @return
+     */
+    static void _NormaliseVec(Array2d<float>& vec);
+
+    /**
+     * @brief       Normalises the MFCC and delta buffers
+     * @return
+     */
+    void _Normalise();
+
+    /**
+     * @brief       Given the quantisation and data type limits, computes
+     *              the quantised values of a floating point input data.
+     * @param[in]   elem            Element to be quantised
+     * @param[in]   quantScale      Scale
+     * @param[in]   quantOffset     Offset
+     * @param[in]   minVal          Numerical limit - minimum
+     * @param[in]   maxVal          Numerical limit - maximum
+     * @return      floating point quantised value
+     */
+    static float _GetQuantElem(
+            const float     elem,
+            const float     quantScale,
+            const int       quantOffset,
+            const float     minVal,
+            const float     maxVal);
+
+    /**
+     * @brief       Quantises the MFCC and delta buffers, and places them
+     *              in the output buffer. While doing so, it transposes
+     *              the data. Reason: Buffers in this class are arranged
+     *              for "time" axis to be row major. Primary reason for
+     *              this being the convolution speed up (as we can use
+     *              contiguous memory). The output, however, requires the
+     *              time axis to be in column major arrangement.
+     * @param[in]   outputBuf       pointer to the output buffer
+     * @param[in]   outputBufSz     output buffer's size
+     * @param[in]   quantScale      quantisation scale
+     * @param[in]   quantOffset     quantisation offset
+     */
+    template <typename T>
+    bool _Quantise(T* outputBuf, int quantOffset, float quantScale)
+    {
+        /* Populate */
+        T* outputBufMfcc = outputBuf;
+        T* outputBufD1 = outputBuf + this->_m_mfcc._m_params.m_numMfccFeatures;
+        T* outputBufD2 = outputBufD1 + this->_m_mfcc._m_params.m_numMfccFeatures;
+        const uint32_t ptrIncr = this->_m_mfcc._m_params.m_numMfccFeatures * 2; /* (3 vectors - 1 vector) */
+
+        const float minVal = std::numeric_limits<T>::min();
+        const float maxVal = std::numeric_limits<T>::max();
+
+        /* We need to do a transpose while copying and concatenating
+         * the tensor*/
+        for (uint32_t j = 0; j < this->_m_mfcc._m_params.m_numMfccVectors; ++j) {
+            for (uint32_t i = 0; i < this->_m_mfcc._m_params.m_numMfccFeatures; ++i)
+            {
+                *outputBufMfcc++ = static_cast<T>(this->_GetQuantElem(
+                        this->_m_mfccBuf(i, j), quantScale,
+                        quantOffset, minVal, maxVal));
+                *outputBufD1++ = static_cast<T>(this->_GetQuantElem(
+                        this->_m_delta1Buf(i, j), quantScale,
+                        quantOffset, minVal, maxVal));
+                *outputBufD2++ = static_cast<T>(this->_GetQuantElem(
+                        this->_m_delta2Buf(i, j), quantScale,
+                        quantOffset, minVal, maxVal));
+            }
+            outputBufMfcc += ptrIncr;
+            outputBufD1 += ptrIncr;
+            outputBufD2 += ptrIncr;
+        }
+
+        return true;
+    }
+};
+
diff --git a/samples/SpeechRecognition/include/SlidingWindow.hpp b/samples/SpeechRecognition/include/SlidingWindow.hpp
new file mode 100644
index 0000000..791a0b7
--- /dev/null
+++ b/samples/SpeechRecognition/include/SlidingWindow.hpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+template<class T>
+class SlidingWindow
+{
+protected:
+    T* m_start = nullptr;
+    size_t m_dataSize = 0;
+    size_t m_size = 0;
+    size_t m_stride = 0;
+    size_t m_count = 0;
+public:
+
+    /**
+     * Creates the window slider through the given data.
+     *
+     * @param data          pointer to the data to slide through.
+     * @param dataSize      size in T type elements wise.
+     * @param windowSize    sliding window size in T type wise elements.
+     * @param stride        stride size in T type wise elements.
+     */
+    SlidingWindow(T* data, size_t dataSize,
+                  size_t windowSize, size_t stride)
+    {
+        m_start = data;
+        m_dataSize = dataSize;
+        m_size = windowSize;
+        m_stride = stride;
+    }
+
+    SlidingWindow() = default;
+
+    ~SlidingWindow() = default;
+
+    /**
+     * Get the next data window.
+     * @return pointer to the next window, if next window is not available nullptr is returned.
+     */
+    virtual T* Next()
+    {
+        if (HasNext())
+        {
+            m_count++;
+            return m_start + Index() * m_stride;
+        }
+        else
+        {
+            return nullptr;
+        }
+    }
+
+    /**
+     * Checks if the next data portion is available.
+     * @return true if next data portion is available
+     */
+    bool HasNext()
+    {
+        return this->m_count < 1 + this->FractionalTotalStrides() && (this->NextWindowStartIndex() < this->m_dataSize);
+    }
+
+    /**
+     * Resest the slider to the initial position.
+     */
+    virtual void Reset()
+    {
+        m_count = 0;
+    }
+
+    /**
+     * Resest the slider to the initial position.
+     */
+    virtual size_t GetWindowSize()
+    {
+        return m_size;
+    }
+
+    /**
+     * Resets the slider to the start of the new data.
+     * New data size MUST be the same as the old one.
+     * @param newStart pointer to the new data to slide through.
+     */
+    virtual void Reset(T* newStart)
+    {
+        m_start = newStart;
+        Reset();
+    }
+
+    /**
+     * Gets current index of the sliding window.
+     * @return current position of the sliding window in number of strides
+     */
+    size_t Index()
+    {
+        return m_count == 0? 0: m_count - 1;
+    }
+
+    /**
+     * Gets the index from the start of the data where the next window will begin.
+     * While Index() returns the index of sliding window itself this function returns the index of the data
+     * element itself.
+     * @return Index from the start of the data where the next sliding window will begin.
+     */
+    virtual size_t NextWindowStartIndex()
+    {
+        return m_count == 0? 0: ((m_count) * m_stride);
+    }
+
+    /**
+     * Go to given sliding window index.
+     * @param index new position of the sliding window. if index is invalid (greater than possible range of strides)
+     *              then next call to Next() will return nullptr.
+     */
+    void FastForward(size_t index)
+    {
+        m_count = index;
+    }
+
+    /**
+     * Calculates whole number of times the window can stride through the given data.
+     * @return maximum number of strides.
+     */
+    size_t TotalStrides()
+    {
+        if (m_size > m_dataSize)
+        {
+            return 0;
+        }
+        return ((m_dataSize - m_size)/m_stride);
+    }
+
+    /**
+     * Calculates number of times the window can stride through the given data. May not be a whole number.
+     * @return Number of strides to cover all data.
+     */
+    float FractionalTotalStrides()
+    {
+        if(this->m_size > this->m_dataSize)
+        {
+            return this->m_dataSize / this->m_size;
+        }
+        else
+        {
+            return ((this->m_dataSize - this->m_size)/ static_cast<float>(this->m_stride));
+        }
+
+    }
+
+    /**
+     * Calculates the remaining data left to be processed
+     * @return The remaining unprocessed data
+     */
+    int RemainingData()
+    {
+        return this->m_dataSize - this->NextWindowStartIndex();
+    }
+};
\ No newline at end of file
diff --git a/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp b/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp
new file mode 100644
index 0000000..47ce304
--- /dev/null
+++ b/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp
@@ -0,0 +1,139 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ArmnnNetworkExecutor.hpp"
+#include "Decoder.hpp"
+#include "MFCC.hpp"
+#include "Preprocess.hpp"
+
+namespace asr
+{
+/**
+ * Generic Speech Recognition pipeline with 3 steps: data pre-processing, inference execution and inference
+ * result post-processing.
+ *
+ */
+class ASRPipeline
+{
+public:
+
+    /**
+     * Creates speech recognition pipeline with given network executor and decoder.
+     * @param executor - unique pointer to inference runner
+     * @param decoder - unique pointer to inference results decoder
+     */
+    ASRPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
+                std::unique_ptr<Decoder> decoder);
+
+    /**
+     * @brief Standard audio pre-processing implementation.
+     *
+     * Preprocesses and prepares the data for inference by
+     * extracting the MFCC features.
+
+     * @param[in] audio - the raw audio data
+     * @param[out] preprocessor - the preprocessor object, which handles the data prepreration
+     */
+    template<typename Tin,typename Tout>
+    std::vector<Tout> PreProcessing(std::vector<Tin>& audio, Preprocess& preprocessor)
+    {
+        int audioDataToPreProcess = preprocessor._m_windowLen +
+                ((preprocessor._m_mfcc._m_params.m_numMfccVectors -1) *preprocessor._m_windowStride);
+        int outputBufferSize = preprocessor._m_mfcc._m_params.m_numMfccVectors
+                * preprocessor._m_mfcc._m_params.m_numMfccFeatures * 3;
+        std::vector<Tout> outputBuffer(outputBufferSize);
+        preprocessor.Invoke(audio.data(), audioDataToPreProcess, outputBuffer, m_executor->GetQuantizationOffset(),
+                            m_executor->GetQuantizationScale());
+        return outputBuffer;
+    }
+
+    /**
+     * @brief Executes inference
+     *
+     * Calls inference runner provided during instance construction.
+     *
+     * @param[in] preprocessedData - input inference data. Data type should be aligned with input tensor.
+     * @param[out] result - raw inference results.
+     */
+    template<typename T>
+    void Inference(const std::vector<T>& preprocessedData, common::InferenceResults<int8_t>& result)
+    {
+        size_t data_bytes = sizeof(std::vector<T>) + (sizeof(T) * preprocessedData.size());
+        m_executor->Run(preprocessedData.data(), data_bytes, result);
+    }
+
+    /**
+     * @brief Standard inference results post-processing implementation.
+     *
+     * Decodes inference results using decoder provided during construction.
+     *
+     * @param[in] inferenceResult - inference results to be decoded.
+     * @param[in] isFirstWindow - for checking if this is the first window of the sliding window.
+     * @param[in] isLastWindow - for checking if this is the last window of the sliding window.
+     * @param[in] currentRContext - the right context of the output text. To be output if it is the last window.
+     */
+    template<typename T>
+    void PostProcessing(common::InferenceResults<int8_t>& inferenceResult,
+                                     bool& isFirstWindow,
+                                     bool isLastWindow,
+                                     std::string currentRContext)
+    {
+        int rowLength = 29;
+        int middleContextStart = 49;
+        int middleContextEnd = 99;
+        int leftContextStart = 0;
+        int rightContextStart = 100;
+        int rightContextEnd = 148;
+
+        std::vector<T> contextToProcess;
+
+        // If isFirstWindow we keep the left context of the output
+        if(isFirstWindow)
+        {
+            std::vector<T> chunk(&inferenceResult[0][leftContextStart],
+                    &inferenceResult[0][middleContextEnd * rowLength]);
+            contextToProcess = chunk;
+        }
+        // Else we only keep the middle context of the output
+        else
+        {
+            std::vector<T> chunk(&inferenceResult[0][middleContextStart * rowLength],
+                    &inferenceResult[0][middleContextEnd * rowLength]);
+            contextToProcess = chunk;
+        }
+        std::string output = this->m_decoder->DecodeOutput<T>(contextToProcess);
+        isFirstWindow = false;
+        std::cout << output << std::flush;
+
+        // If this is the last window, we print the right context of the output
+        if(isLastWindow)
+        {
+            std::vector<T> rContext(&inferenceResult[0][rightContextStart*rowLength],
+                    &inferenceResult[0][rightContextEnd * rowLength]);
+            currentRContext = this->m_decoder->DecodeOutput(rContext);
+            std::cout << currentRContext << std::endl;
+        }
+    }
+
+protected:
+    std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> m_executor;
+    std::unique_ptr<Decoder> m_decoder;
+};
+
+using IPipelinePtr = std::unique_ptr<asr::ASRPipeline>;
+
+/**
+ * Constructs speech recognition pipeline based on configuration provided.
+ *
+ * @param[in] config - speech recognition pipeline configuration.
+ * @param[in] labels - asr labels
+ *
+ * @return unique pointer to asr pipeline.
+ */
+IPipelinePtr CreatePipeline(common::PipelineOptions& config, std::map<int, std::string>& labels);
+
+}// namespace asr
\ No newline at end of file
diff --git a/samples/SpeechRecognition/src/AudioCapture.cpp b/samples/SpeechRecognition/src/AudioCapture.cpp
new file mode 100644
index 0000000..f3b9092
--- /dev/null
+++ b/samples/SpeechRecognition/src/AudioCapture.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "AudioCapture.hpp"
+#include <alsa/asoundlib.h>
+#include <sndfile.h>
+#include <samplerate.h>
+
+namespace asr
+{
+    std::vector<float> AudioCapture::LoadAudioFile(std::string filePath)
+    {
+        SF_INFO inputSoundFileInfo;
+        SNDFILE* infile = NULL;
+        infile = sf_open(filePath.c_str(), SFM_READ, &inputSoundFileInfo);
+
+        float audioIn[inputSoundFileInfo.channels * inputSoundFileInfo.frames];
+        sf_read_float(infile, audioIn, inputSoundFileInfo.channels * inputSoundFileInfo.frames);
+
+        float sampleRate = 16000.0f;
+        float srcRatio = sampleRate / (float)inputSoundFileInfo.samplerate;
+        int outputFrames = ceil(inputSoundFileInfo.frames * srcRatio);
+        float dataOut[outputFrames];
+
+        // Convert to mono
+        float monoData[inputSoundFileInfo.frames];
+        for(int i = 0; i < inputSoundFileInfo.frames; i++)
+        {
+            float val = 0.0f;
+            for(int j = 0; j < inputSoundFileInfo.channels; j++)
+                monoData[i] += audioIn[i * inputSoundFileInfo.channels + j];
+            monoData[i] /= inputSoundFileInfo.channels;
+        }
+
+        // Resample
+        SRC_DATA srcData;
+        srcData.data_in = monoData;
+        srcData.input_frames = inputSoundFileInfo.frames;
+        srcData.data_out = dataOut;
+        srcData.output_frames = outputFrames;
+        srcData.src_ratio = srcRatio;
+
+        src_simple(&srcData, SRC_SINC_BEST_QUALITY, 1);
+
+        // Convert to Vector
+        std::vector<float> processedInput;
+
+        for(int i = 0; i < srcData.output_frames_gen; ++i)
+        {
+            processedInput.push_back(srcData.data_out[i]);
+        }
+
+        sf_close(infile);
+
+        return processedInput;
+    }
+
+    void AudioCapture::InitSlidingWindow(float* data, size_t dataSize, int minSamples, size_t stride)
+    {
+        this->m_window = SlidingWindow<const float>(data, dataSize, minSamples, stride);
+    }
+
+    bool AudioCapture::HasNext()
+    {
+        return m_window.HasNext();
+    }
+
+    std::vector<float> AudioCapture::Next()
+    {
+        if (this->m_window.HasNext())
+        {
+            int remainingData = this->m_window.RemainingData();
+            const float* windowData = this->m_window.Next();
+
+            size_t windowSize = this->m_window.GetWindowSize();
+
+            if(remainingData < windowSize)
+            {
+                std::vector<float> mfccAudioData(windowSize, 0.0f);
+                for(int i = 0; i < remainingData; ++i)
+                {
+                    mfccAudioData[i] = *windowData;
+                    if(i < remainingData - 1)
+                    {
+                        ++windowData;
+                    }
+                }
+                return mfccAudioData;
+            }
+            else
+            {
+                std::vector<float> mfccAudioData(windowData,  windowData + windowSize);
+                return mfccAudioData;
+            }
+        }
+        else
+        {
+            throw std::out_of_range("Error, end of audio data reached.");
+        }
+    }
+} //namespace asr
+
diff --git a/samples/SpeechRecognition/src/Decoder.cpp b/samples/SpeechRecognition/src/Decoder.cpp
new file mode 100644
index 0000000..663d4db
--- /dev/null
+++ b/samples/SpeechRecognition/src/Decoder.cpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Decoder.hpp"
+
+namespace asr {
+
+    Decoder::Decoder(std::map<int, std::string>& labels):
+            m_labels(labels)
+    {}
+
+    std::string Decoder::FilterCharacters(std::vector<char>& unfiltered)
+    {
+        std::string filtered = "";
+
+        for(int i = 0; i < unfiltered.size(); ++i)
+        {
+            if (unfiltered.at(i) == '$')
+            {
+                continue;
+            }
+
+            else if (i + 1 < unfiltered.size() && unfiltered.at(i) == unfiltered.at(i + 1))
+            {
+                continue;
+            }
+            else
+            {
+                filtered += unfiltered.at(i);
+            }
+        }
+        return filtered;
+    }
+}// namespace
+
diff --git a/samples/SpeechRecognition/src/MFCC.cpp b/samples/SpeechRecognition/src/MFCC.cpp
new file mode 100644
index 0000000..234b14d
--- /dev/null
+++ b/samples/SpeechRecognition/src/MFCC.cpp
@@ -0,0 +1,397 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <cstdio>
+#include <float.h>
+
+#include "MFCC.hpp"
+#include "MathUtils.hpp"
+
+
+MfccParams::MfccParams(
+        const float samplingFreq,
+        const int numFbankBins,
+        const float melLoFreq,
+        const float melHiFreq,
+        const int numMfccFeats,
+        const int frameLen,
+        const bool useHtkMethod,
+        const int numMfccVectors):
+        m_samplingFreq(samplingFreq),
+        m_numFbankBins(numFbankBins),
+        m_melLoFreq(melLoFreq),
+        m_melHiFreq(melHiFreq),
+        m_numMfccFeatures(numMfccFeats),
+        m_frameLen(frameLen),
+        m_numMfccVectors(numMfccVectors),
+
+        /* Smallest power of 2 >= frame length. */
+        m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
+        m_useHtkMethod(useHtkMethod)
+{}
+
+std::string MfccParams::Str()
+{
+    char strC[1024];
+    snprintf(strC, sizeof(strC) - 1, "\n   \
+            \n\t Sampling frequency:         %f\
+            \n\t Number of filter banks:     %u\
+            \n\t Mel frequency limit (low):  %f\
+            \n\t Mel frequency limit (high): %f\
+            \n\t Number of MFCC features:    %u\
+            \n\t Frame length:               %u\
+            \n\t Padded frame length:        %u\
+            \n\t Using HTK for Mel scale:    %s\n",
+             this->m_samplingFreq, this->m_numFbankBins, this->m_melLoFreq,
+             this->m_melHiFreq, this->m_numMfccFeatures, this->m_frameLen,
+             this->m_frameLenPadded, this->m_useHtkMethod ? "yes" : "no");
+    return std::string{strC};
+}
+
+MFCC::MFCC(const MfccParams& params):
+        _m_params(params),
+        _m_filterBankInitialised(false)
+{
+    this->_m_buffer = std::vector<float>(
+            this->_m_params.m_frameLenPadded, 0.0);
+    this->_m_frame = std::vector<float>(
+            this->_m_params.m_frameLenPadded, 0.0);
+    this->_m_melEnergies = std::vector<float>(
+            this->_m_params.m_numFbankBins, 0.0);
+
+    this->_m_windowFunc = std::vector<float>(this->_m_params.m_frameLen);
+    const float multiplier = 2 * M_PI / this->_m_params.m_frameLen;
+
+    /* Create window function. */
+    for (size_t i = 0; i < this->_m_params.m_frameLen; i++)
+    {
+        this->_m_windowFunc[i] = (0.5 - (0.5 * cos(static_cast<float>(i) * multiplier)));
+    }
+}
+
+void MFCC::Init()
+{
+    this->_InitMelFilterBank();
+}
+
+float MFCC::MelScale(const float freq, const bool useHTKMethod)
+{
+    if (useHTKMethod)
+    {
+        return 1127.0f * logf (1.0f + freq / 700.0f);
+    }
+    else
+    {
+        /* Slaney formula for mel scale. */
+        float mel = freq / freqStep;
+
+        if (freq >= minLogHz)
+        {
+            mel = minLogMel + logf(freq / minLogHz) / logStep;
+        }
+        return mel;
+    }
+}
+
+float MFCC::InverseMelScale(const float melFreq, const bool useHTKMethod)
+{
+    if (useHTKMethod)
+    {
+        return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
+    }
+    else
+    {
+        /* Slaney formula for mel scale. */
+        float freq = freqStep * melFreq;
+
+        if (melFreq >= minLogMel)
+        {
+            freq = minLogHz * expf(logStep * (melFreq - minLogMel));
+        }
+        return freq;
+    }
+}
+
+
+bool MFCC::ApplyMelFilterBank(
+        std::vector<float>&                 fftVec,
+        std::vector<std::vector<float>>&    melFilterBank,
+        std::vector<int32_t>&               filterBankFilterFirst,
+        std::vector<int32_t>&               filterBankFilterLast,
+        std::vector<float>&                 melEnergies)
+{
+    const size_t numBanks = melEnergies.size();
+
+    if (numBanks != filterBankFilterFirst.size() ||
+        numBanks != filterBankFilterLast.size())
+    {
+        printf("unexpected filter bank lengths\n");
+        return false;
+    }
+
+    for (size_t bin = 0; bin < numBanks; ++bin)
+    {
+        auto filterBankIter = melFilterBank[bin].begin();
+        float melEnergy = 1e-10; /* Avoid log of zero at later stages */
+        const int32_t firstIndex = filterBankFilterFirst[bin];
+        const int32_t lastIndex = filterBankFilterLast[bin];
+
+        for (int32_t i = firstIndex; i <= lastIndex; ++i)
+        {
+            melEnergy += (*filterBankIter++ * fftVec[i]);
+        }
+
+        melEnergies[bin] = melEnergy;
+    }
+
+    return true;
+}
+
+void MFCC::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+{
+    float maxMelEnergy = -FLT_MAX;
+
+    /* Container for natural logarithms of mel energies */
+    std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+    /* Because we are taking natural logs, we need to multiply by log10(e).
+     * Also, for wav2letter model, we scale our log10 values by 10 */
+    constexpr float multiplier = 10.0 * /* default scalar */
+                                 0.4342944819032518; /* log10f(std::exp(1.0))*/
+
+    /* Take log of the whole vector */
+    MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+    /* Scale the log values and get the max */
+    for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+         iterM != melEnergies.end(); ++iterM, ++iterL)
+    {
+        *iterM = *iterL * multiplier;
+
+        /* Save the max mel energy. */
+        if (*iterM > maxMelEnergy)
+        {
+            maxMelEnergy = *iterM;
+        }
+    }
+
+    /* Clamp the mel energies */
+    constexpr float maxDb = 80.0;
+    const float clampLevelLowdB = maxMelEnergy - maxDb;
+    for (auto iter = melEnergies.begin(); iter != melEnergies.end(); ++iter)
+    {
+        *iter = std::max(*iter, clampLevelLowdB);
+    }
+}
+
+void MFCC::_ConvertToPowerSpectrum()
+{
+    const uint32_t halfDim = this->_m_params.m_frameLenPadded / 2;
+
+    /* Handle this special case. */
+    float firstEnergy = this->_m_buffer[0] * this->_m_buffer[0];
+    float lastEnergy = this->_m_buffer[1] * this->_m_buffer[1];
+
+    MathUtils::ComplexMagnitudeSquaredF32(
+            this->_m_buffer.data(),
+            this->_m_buffer.size(),
+            this->_m_buffer.data(),
+            this->_m_buffer.size()/2);
+
+    this->_m_buffer[0] = firstEnergy;
+    this->_m_buffer[halfDim] = lastEnergy;
+}
+
+std::vector<float> MFCC::CreateDCTMatrix(
+        const int32_t inputLength,
+        const int32_t coefficientCount)
+{
+    std::vector<float> dctMatix(inputLength * coefficientCount);
+
+    /* Orthonormal normalization. */
+    const float normalizerK0 = 2 * sqrt(1.0 / static_cast<float>(4*inputLength));
+    const float normalizer = 2 * sqrt(1.0 / static_cast<float>(2*inputLength));
+
+    const float angleIncr = M_PI/inputLength;
+    float angle = angleIncr; /* we start using it at k = 1 loop */
+
+    /* First row of DCT will use normalizer K0 */
+    for (int32_t n = 0; n < inputLength; ++n)
+    {
+        dctMatix[n] = normalizerK0;
+    }
+
+    /* Second row (index = 1) onwards, we use standard normalizer */
+    for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength)
+    {
+        for (int32_t n = 0; n < inputLength; ++n)
+        {
+            dctMatix[m+n] = normalizer *
+                            cos((n + 0.5) * angle);
+        }
+        angle += angleIncr;
+    }
+    return dctMatix;
+}
+
+float MFCC::GetMelFilterBankNormaliser(
+        const float&    leftMel,
+        const float&    rightMel,
+        const bool      useHTKMethod)
+{
+/* Slaney normalization for mel weights. */
+    return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
+                    MFCC::InverseMelScale(leftMel, useHTKMethod)));
+}
+
+void MFCC::_InitMelFilterBank()
+{
+    if (!this->_IsMelFilterBankInited())
+    {
+        this->_m_melFilterBank = this->_CreateMelFilterBank();
+        this->_m_dctMatrix = this->CreateDCTMatrix(
+                this->_m_params.m_numFbankBins,
+                this->_m_params.m_numMfccFeatures);
+        this->_m_filterBankInitialised = true;
+    }
+}
+
+bool MFCC::_IsMelFilterBankInited()
+{
+    return this->_m_filterBankInitialised;
+}
+
+void MFCC::_MfccComputePreFeature(const std::vector<float>& audioData)
+{
+    this->_InitMelFilterBank();
+
+    /* TensorFlow way of normalizing .wav data to (-1, 1). */
+    constexpr float normaliser = 1.0;
+    for (size_t i = 0; i < this->_m_params.m_frameLen; i++)
+    {
+        this->_m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
+    }
+
+    /* Apply window function to input frame. */
+    for(size_t i = 0; i < this->_m_params.m_frameLen; i++)
+    {
+        this->_m_frame[i] *= this->_m_windowFunc[i];
+    }
+
+    /* Set remaining frame values to 0. */
+    std::fill(this->_m_frame.begin() + this->_m_params.m_frameLen,this->_m_frame.end(), 0);
+
+    /* Compute FFT. */
+    MathUtils::FftF32(this->_m_frame, this->_m_buffer);
+
+    /* Convert to power spectrum. */
+    this->_ConvertToPowerSpectrum();
+
+    /* Apply mel filterbanks. */
+    if (!this->ApplyMelFilterBank(this->_m_buffer,
+                                  this->_m_melFilterBank,
+                                  this->_m_filterBankFilterFirst,
+                                  this->_m_filterBankFilterLast,
+                                  this->_m_melEnergies))
+    {
+        printf("Failed to apply MEL filter banks\n");
+    }
+
+    /* Convert to logarithmic scale */
+    this->ConvertToLogarithmicScale(this->_m_melEnergies);
+}
+
+std::vector<float> MFCC::MfccCompute(const std::vector<float>& audioData)
+{
+    this->_MfccComputePreFeature(audioData);
+
+    std::vector<float> mfccOut(this->_m_params.m_numMfccFeatures);
+
+    float * ptrMel = this->_m_melEnergies.data();
+    float * ptrDct = this->_m_dctMatrix.data();
+    float * ptrMfcc = mfccOut.data();
+
+    /* Take DCT. Uses matrix mul. */
+    for (size_t i = 0, j = 0; i < mfccOut.size();
+         ++i, j += this->_m_params.m_numFbankBins)
+    {
+        *ptrMfcc++ = MathUtils::DotProductF32(
+                ptrDct + j,
+                ptrMel,
+                this->_m_params.m_numFbankBins);
+    }
+
+    return mfccOut;
+}
+
+std::vector<std::vector<float>> MFCC::_CreateMelFilterBank()
+{
+    size_t numFftBins = this->_m_params.m_frameLenPadded / 2;
+    float fftBinWidth = static_cast<float>(this->_m_params.m_samplingFreq) / this->_m_params.m_frameLenPadded;
+
+    float melLowFreq = MFCC::MelScale(this->_m_params.m_melLoFreq,
+                                      this->_m_params.m_useHtkMethod);
+    float melHighFreq = MFCC::MelScale(this->_m_params.m_melHiFreq,
+                                       this->_m_params.m_useHtkMethod);
+    float melFreqDelta = (melHighFreq - melLowFreq) / (this->_m_params.m_numFbankBins + 1);
+
+    std::vector<float> thisBin = std::vector<float>(numFftBins);
+    std::vector<std::vector<float>> melFilterBank(
+            this->_m_params.m_numFbankBins);
+    this->_m_filterBankFilterFirst =
+            std::vector<int32_t>(this->_m_params.m_numFbankBins);
+    this->_m_filterBankFilterLast =
+            std::vector<int32_t>(this->_m_params.m_numFbankBins);
+
+    for (size_t bin = 0; bin < this->_m_params.m_numFbankBins; bin++)
+    {
+        float leftMel = melLowFreq + bin * melFreqDelta;
+        float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
+        float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
+
+        int32_t firstIndex = -1;
+        int32_t lastIndex = -1;
+        const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->_m_params.m_useHtkMethod);
+
+        for (size_t i = 0; i < numFftBins; i++)
+        {
+            float freq = (fftBinWidth * i); /* Center freq of this fft bin. */
+            float mel = MFCC::MelScale(freq, this->_m_params.m_useHtkMethod);
+            thisBin[i] = 0.0;
+
+            if (mel > leftMel && mel < rightMel)
+            {
+                float weight;
+                if (mel <= centerMel)
+                {
+                    weight = (mel - leftMel) / (centerMel - leftMel);
+                }
+                else
+                {
+                    weight = (rightMel - mel) / (rightMel - centerMel);
+                }
+
+                thisBin[i] = weight * normaliser;
+                if (firstIndex == -1)
+                {
+                    firstIndex = i;
+                }
+                lastIndex = i;
+            }
+        }
+
+        this->_m_filterBankFilterFirst[bin] = firstIndex;
+        this->_m_filterBankFilterLast[bin] = lastIndex;
+
+        /* Copy the part we care about. */
+        for (int32_t i = firstIndex; i <= lastIndex; i++)
+        {
+            melFilterBank[bin].push_back(thisBin[i]);
+        }
+    }
+
+    return melFilterBank;
+}
+
diff --git a/samples/SpeechRecognition/src/Main.cpp b/samples/SpeechRecognition/src/Main.cpp
new file mode 100644
index 0000000..de37e23
--- /dev/null
+++ b/samples/SpeechRecognition/src/Main.cpp
@@ -0,0 +1,157 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <iostream>
+#include <map>
+#include <vector>
+#include <algorithm>
+#include <cmath>
+
+#include "CmdArgsParser.hpp"
+#include "ArmnnNetworkExecutor.hpp"
+#include "AudioCapture.hpp"
+#include "Preprocess.hpp"
+#include "Decoder.hpp"
+#include "SpeechRecognitionPipeline.hpp"
+
+
+using InferenceResult = std::vector<int8_t>;
+using InferenceResults = std::vector<InferenceResult>;
+
+const std::string AUDIO_FILE_PATH = "--audio-file-path";
+const std::string MODEL_FILE_PATH = "--model-file-path";
+const std::string LABEL_PATH = "--label-path";
+const std::string PREFERRED_BACKENDS = "--preferred-backends";
+const std::string HELP = "--help";
+
+std::map<int, std::string> labels = {
+        {0, "a" },
+        {1, "b" },
+        {2, "c" },
+        {3, "d" },
+        {4, "e" },
+        {5, "f" },
+        {6, "g" },
+        {7, "h" },
+        {8, "i" },
+        {9, "j" },
+        {10,"k" },
+        {11,"l" },
+        {12,"m" },
+        {13,"n" },
+        {14,"o" },
+        {15,"p" },
+        {16,"q" },
+        {17,"r" },
+        {18,"s" },
+        {19,"t" },
+        {20,"u" },
+        {21,"v" },
+        {22,"w" },
+        {23,"x" },
+        {24,"y" },
+        {25,"z" },
+        {26, "\'" },
+        {27, " "},
+        {28,"$" }
+};
+
+/*
+ * The accepted options for this Speech Recognition executable
+ */
+static std::map<std::string, std::string> CMD_OPTIONS = {
+        {AUDIO_FILE_PATH, "[REQUIRED] Path to the Audio file to run speech recognition on"},
+        {MODEL_FILE_PATH, "[REQUIRED] Path to the Speech Recognition model to use"},
+        {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
+                             " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
+                             " Defaults to CpuAcc,CpuRef"}
+};
+
+/*
+ * Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
+ */
+std::vector<armnn::BackendId> GetPreferredBackendList(const std::string& preferredBackends)
+{
+    std::vector<armnn::BackendId> backends;
+    std::stringstream ss(preferredBackends);
+
+    while(ss.good())
+    {
+        std::string backend;
+        std::getline( ss, backend, ',' );
+        backends.emplace_back(backend);
+    }
+    return backends;
+}
+
+int main(int argc, char *argv[])
+{
+    // Wav2Letter ASR SETTINGS
+    int             SAMP_FREQ                  = 16000;
+    int             FRAME_LEN_MS               = 32;
+    int             FRAME_LEN_SAMPLES          = SAMP_FREQ * FRAME_LEN_MS * 0.001;
+    int             NUM_MFCC_FEATS             = 13;
+    int             MFCC_WINDOW_LEN            = 512;
+    int             MFCC_WINDOW_STRIDE         = 160;
+    const int       NUM_MFCC_VECTORS           = 296;
+    int             SAMPLES_PER_INFERENCE      = MFCC_WINDOW_LEN + ((NUM_MFCC_VECTORS -1) * MFCC_WINDOW_STRIDE);
+    int             MEL_LO_FREQ                = 0;
+    int             MEL_HI_FREQ                = 8000;
+    int             NUM_FBANK_BIN              = 128;
+    int             INPUT_WINDOW_LEFT_CONTEXT  = 98;
+    int             INPUT_WINDOW_RIGHT_CONTEXT = 98;
+    int             INPUT_WINDOW_INNER_CONTEXT = NUM_MFCC_VECTORS -
+            (INPUT_WINDOW_LEFT_CONTEXT + INPUT_WINDOW_RIGHT_CONTEXT);
+    int             SLIDING_WINDOW_OFFSET      = INPUT_WINDOW_INNER_CONTEXT * MFCC_WINDOW_STRIDE;
+
+
+    MfccParams mfccParams(SAMP_FREQ, NUM_FBANK_BIN,
+            MEL_LO_FREQ, MEL_HI_FREQ, NUM_MFCC_FEATS, FRAME_LEN_SAMPLES, false, NUM_MFCC_VECTORS);
+
+    MFCC mfccInst = MFCC(mfccParams);
+
+    Preprocess preprocessor(MFCC_WINDOW_LEN, MFCC_WINDOW_STRIDE, mfccInst);
+
+    bool isFirstWindow = true;
+    std::string currentRContext  = "";
+
+    std::map <std::string, std::string> options;
+
+    int result = ParseOptions(options, CMD_OPTIONS, argv, argc);
+    if (result != 0)
+    {
+        return result;
+    }
+
+    // Create the network options
+    common::PipelineOptions pipelineOptions;
+    pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
+
+    if (CheckOptionSpecified(options, PREFERRED_BACKENDS))
+    {
+        pipelineOptions.m_backends = GetPreferredBackendList((GetSpecifiedOption(options, PREFERRED_BACKENDS)));
+    }
+    else
+    {
+        pipelineOptions.m_backends = {"CpuAcc", "CpuRef"};
+    }
+
+    asr::IPipelinePtr asrPipeline = asr::CreatePipeline(pipelineOptions, labels);
+
+    asr::AudioCapture capture;
+    std::vector<float> audioData = capture.LoadAudioFile(GetSpecifiedOption(options, AUDIO_FILE_PATH));
+    capture.InitSlidingWindow(audioData.data(), audioData.size(), SAMPLES_PER_INFERENCE, SLIDING_WINDOW_OFFSET);
+
+    while (capture.HasNext())
+    {
+        std::vector<float> audioBlock = capture.Next();
+        InferenceResults results;
+
+        std::vector<int8_t> preprocessedData = asrPipeline->PreProcessing<float, int8_t>(audioBlock, preprocessor);
+        asrPipeline->Inference<int8_t>(preprocessedData, results);
+        asrPipeline->PostProcessing<int8_t>(results, isFirstWindow, !capture.HasNext(), currentRContext);
+    }
+
+    return 0;
+}
\ No newline at end of file
diff --git a/samples/SpeechRecognition/src/MathUtils.cpp b/samples/SpeechRecognition/src/MathUtils.cpp
new file mode 100644
index 0000000..bf99083
--- /dev/null
+++ b/samples/SpeechRecognition/src/MathUtils.cpp
@@ -0,0 +1,112 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MathUtils.hpp"
+#include <vector>
+#include <cmath>
+#include <cstdio>
+
+void MathUtils::FftF32(std::vector<float>& input,
+                       std::vector<float>& fftOutput)
+{
+    const int inputLength = input.size();
+
+    for (int k = 0; k <= inputLength / 2; k++)
+    {
+        float sumReal = 0, sumImag = 0;
+
+        for (int t = 0; t < inputLength; t++)
+        {
+            float angle = 2 * M_PI * t * k / inputLength;
+            sumReal += input[t] * cosf(angle);
+            sumImag += -input[t] * sinf(angle);
+        }
+
+        /* Arrange output to [real0, realN/2, real1, im1, real2, im2, ...] */
+        if (k == 0)
+        {
+            fftOutput[0] = sumReal;
+        }
+        else if (k == inputLength / 2)
+        {
+            fftOutput[1] = sumReal;
+        }
+        else
+        {
+            fftOutput[k*2] = sumReal;
+            fftOutput[k*2 + 1] = sumImag;
+        };
+    }
+}
+
+float MathUtils::DotProductF32(float* srcPtrA, float* srcPtrB,
+                               const int srcLen)
+{
+    float output = 0.f;
+
+    for (int i = 0; i < srcLen; ++i)
+    {
+        output += *srcPtrA++ * *srcPtrB++;
+    }
+    return output;
+}
+
+bool MathUtils::ComplexMagnitudeSquaredF32(float* ptrSrc,
+                                           const int srcLen,
+                                           float* ptrDst,
+                                           const int dstLen)
+{
+    if (dstLen < srcLen/2)
+    {
+        printf("dstLen must be greater than srcLen/2");
+        return false;
+    }
+
+    for (int j = 0; j < srcLen; ++j)
+    {
+        const float real = *ptrSrc++;
+        const float im = *ptrSrc++;
+        *ptrDst++ = real*real + im*im;
+    }
+    return true;
+}
+
+void MathUtils::VecLogarithmF32(std::vector <float>& input,
+                                std::vector <float>& output)
+{
+    for (auto in = input.begin(), out = output.begin();
+         in != input.end(); ++in, ++out)
+    {
+        *out = logf(*in);
+    }
+}
+
+float MathUtils::MeanF32(float* ptrSrc, const uint32_t srcLen)
+{
+    if (!srcLen)
+    {
+        return 0.f;
+    }
+
+    float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0);
+    return acc/srcLen;
+}
+
+float MathUtils::StdDevF32(float* ptrSrc, const uint32_t srcLen,
+                           const float mean)
+{
+    if (!srcLen)
+    {
+        return 0.f;
+    }
+    auto VarianceFunction = [=](float acc, const float value) {
+        return acc + (((value - mean) * (value - mean))/ srcLen);
+    };
+
+    float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0,
+                                VarianceFunction);
+    return sqrtf(acc);
+}
+
diff --git a/samples/SpeechRecognition/src/Preprocess.cpp b/samples/SpeechRecognition/src/Preprocess.cpp
new file mode 100644
index 0000000..8627961
--- /dev/null
+++ b/samples/SpeechRecognition/src/Preprocess.cpp
@@ -0,0 +1,192 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <algorithm>
+#include <numeric>
+#include <math.h>
+#include <string.h>
+
+#include "MathUtils.hpp"
+#include "Preprocess.hpp"
+
+Preprocess::Preprocess(
+        const uint32_t  windowLen,
+        const uint32_t  windowStride,
+        const MFCC mfccInst):
+        _m_mfcc(mfccInst),
+        _m_mfccBuf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
+        _m_delta1Buf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
+        _m_delta2Buf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
+        _m_windowLen(windowLen),
+        _m_windowStride(windowStride)
+{
+    if (mfccInst._m_params.m_numMfccFeatures > 0 && windowLen > 0)
+    {
+        this->_m_mfcc.Init();
+    }
+}
+
+Preprocess::~Preprocess()
+{
+}
+
+bool Preprocess::Invoke( const float*  audioData, const uint32_t  audioDataLen, std::vector<int8_t>& output,
+        int quantOffset, float quantScale)
+{
+    this->_m_window = SlidingWindow<const float>(
+            audioData, audioDataLen,
+            this->_m_windowLen, this->_m_windowStride);
+
+    uint32_t mfccBufIdx = 0;
+
+    // Init buffers with 0
+    std::fill(_m_mfccBuf.begin(), _m_mfccBuf.end(), 0.f);
+    std::fill(_m_delta1Buf.begin(), _m_delta1Buf.end(), 0.f);
+    std::fill(_m_delta2Buf.begin(), _m_delta2Buf.end(), 0.f);
+
+    /* While we can slide over the window */
+    while (this->_m_window.HasNext())
+    {
+        const float*  mfccWindow = this->_m_window.Next();
+        auto mfccAudioData = std::vector<float>(
+                mfccWindow,
+                mfccWindow + this->_m_windowLen);
+
+        auto mfcc = this->_m_mfcc.MfccCompute(mfccAudioData);
+        for (size_t i = 0; i < this->_m_mfccBuf.size(0); ++i)
+        {
+            this->_m_mfccBuf(i, mfccBufIdx) = mfcc[i];
+        }
+        ++mfccBufIdx;
+    }
+
+    /* Pad MFCC if needed by repeating last feature vector */
+    while (mfccBufIdx != this->_m_mfcc._m_params.m_numMfccVectors)
+    {
+        memcpy(&this->_m_mfccBuf(0, mfccBufIdx),
+               &this->_m_mfccBuf(0, mfccBufIdx-1), sizeof(float)*this->_m_mfcc._m_params.m_numMfccFeatures);
+        ++mfccBufIdx;
+    }
+
+    /* Compute first and second order deltas from MFCCs */
+    this->_ComputeDeltas(this->_m_mfccBuf,
+                         this->_m_delta1Buf,
+                         this->_m_delta2Buf);
+
+    /* Normalise */
+    this->_Normalise();
+
+    return this->_Quantise<int8_t>(output.data(), quantOffset, quantScale);
+}
+
+bool Preprocess::_ComputeDeltas(Array2d<float>& mfcc,
+                                Array2d<float>& delta1,
+                                Array2d<float>& delta2)
+{
+    const std::vector <float> delta1Coeffs =
+            {6.66666667e-02,  5.00000000e-02,  3.33333333e-02,
+             1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
+             -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
+
+    const std::vector <float> delta2Coeffs =
+            {0.06060606,      0.01515152,     -0.01731602,
+             -0.03679654,     -0.04329004,     -0.03679654,
+             -0.01731602,      0.01515152,      0.06060606};
+
+    if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
+        mfcc.size(0) == 0 || mfcc.size(1) == 0)
+    {
+        return false;
+    }
+
+    /* Get the middle index; coeff vec len should always be odd */
+    const size_t coeffLen = delta1Coeffs.size();
+    const size_t fMidIdx = (coeffLen - 1)/2;
+    const size_t numFeatures = mfcc.size(0);
+    const size_t numFeatVectors = mfcc.size(1);
+
+    /* iterate through features in MFCC vector*/
+    for (size_t i = 0; i < numFeatures; ++i)
+    {
+        /* for each feature, iterate through time (t) samples representing feature evolution and
+        * calculate d/dt and d^2/dt^2, using 1d convolution with differential kernels.
+        * Convolution padding = valid, result size is `time length - kernel length + 1`.
+        * The result is padded with 0 from both sides to match the size of initial time samples data.
+        *
+        * For the small filter, conv1d implementation as a simple loop is efficient enough.
+        * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
+        */
+
+        for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j)
+        {
+            float d1 = 0;
+            float d2 = 0;
+            const size_t mfccStIdx = j - fMidIdx;
+
+            for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m)
+            {
+
+                d1 +=  mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
+                d2 +=  mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
+            }
+
+            delta1(i,j) = d1;
+            delta2(i,j) = d2;
+        }
+    }
+
+    return true;
+}
+
+float Preprocess::_GetMean(Array2d<float>& vec)
+{
+    return MathUtils::MeanF32(vec.begin(), vec.totalSize());
+}
+
+float Preprocess::_GetStdDev(Array2d<float>& vec, const float mean)
+{
+    return MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
+}
+
+void Preprocess::_NormaliseVec(Array2d<float>& vec)
+{
+    auto mean = Preprocess::_GetMean(vec);
+    auto stddev = Preprocess::_GetStdDev(vec, mean);
+
+    if (stddev == 0)
+    {
+        std::fill(vec.begin(), vec.end(), 0);
+    }
+    else
+    {
+        const float stddevInv = 1.f/stddev;
+        const float normalisedMean = mean/stddev;
+
+        auto NormalisingFunction = [=](float &value) {
+            value = value * stddevInv - normalisedMean;
+        };
+        std::for_each(vec.begin(), vec.end(), NormalisingFunction);
+    }
+}
+
+void Preprocess::_Normalise()
+{
+    Preprocess::_NormaliseVec(this->_m_mfccBuf);
+    Preprocess::_NormaliseVec(this->_m_delta1Buf);
+    Preprocess::_NormaliseVec(this->_m_delta2Buf);
+}
+
+float Preprocess::_GetQuantElem(
+        const float     elem,
+        const float     quantScale,
+        const int       quantOffset,
+        const float     minVal,
+        const float     maxVal)
+{
+    float val = std::round((elem/quantScale) + quantOffset);
+    float maxim = std::max<float>(val, minVal);
+    float returnVal = std::min<float>(std::max<float>(val, minVal), maxVal);
+    return returnVal;
+}
\ No newline at end of file
diff --git a/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp b/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
new file mode 100644
index 0000000..1b822d6
--- /dev/null
+++ b/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpeechRecognitionPipeline.hpp"
+#include "ArmnnNetworkExecutor.hpp"
+
+namespace asr
+{
+ASRPipeline::ASRPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
+                         std::unique_ptr<Decoder> decoder
+                         ) :
+        m_executor(std::move(executor)),
+        m_decoder(std::move(decoder)){}
+
+IPipelinePtr CreatePipeline(common::PipelineOptions& config, std::map<int, std::string>& labels)
+{
+    auto executor = std::make_unique<common::ArmnnNetworkExecutor<int8_t>>(config.m_ModelFilePath, config.m_backends);
+
+    auto decoder = std::make_unique<asr::Decoder>(labels);
+
+    return std::make_unique<asr::ASRPipeline>(std::move(executor), std::move(decoder));
+}
+
+}// namespace asr
\ No newline at end of file
diff --git a/samples/SpeechRecognition/test/AudioCaptureTest.cpp b/samples/SpeechRecognition/test/AudioCaptureTest.cpp
new file mode 100644
index 0000000..94b4e7c
--- /dev/null
+++ b/samples/SpeechRecognition/test/AudioCaptureTest.cpp
@@ -0,0 +1,61 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
+#include <limits>
+
+#include "AudioCapture.hpp"
+
+TEST_CASE("Test capture of audio file")
+{
+    std::string testResources = TEST_RESOURCE_DIR;
+    REQUIRE(testResources != "");
+    std::string file =  testResources + "/" + "myVoiceIsMyPassportVerifyMe04.wav";
+    asr::AudioCapture capture;
+    std::vector<float> audioData = capture.LoadAudioFile(file);
+    capture.InitSlidingWindow(audioData.data(), audioData.size(), 47712, 16000);
+
+    std::vector<float> firstAudioBlock = capture.Next();
+    float actual1 = firstAudioBlock.at(0);
+    float actual2 = firstAudioBlock.at(47000);
+    CHECK(std::to_string(actual1) == "0.000352");
+    CHECK(std::to_string(actual2) == "-0.056441");
+    CHECK(firstAudioBlock.size() == 47712);
+
+    CHECK(capture.HasNext() == true);
+
+    std::vector<float> secondAudioBlock = capture.Next();
+    float actual3 = secondAudioBlock.at(0);
+    float actual4 = secondAudioBlock.at(47000);
+    CHECK(std::to_string(actual3) == "0.102077");
+    CHECK(std::to_string(actual4) == "0.000194");
+    CHECK(capture.HasNext() == true);
+
+    std::vector<float> thirdAudioBlock = capture.Next();
+    float actual5 = thirdAudioBlock.at(0);
+    float actual6 = thirdAudioBlock.at(33500);
+    float actual7 = thirdAudioBlock.at(33600);
+    CHECK(std::to_string(actual5) == "-0.076416");
+    CHECK(std::to_string(actual6) == "-0.000275");
+    CHECK(std::to_string(actual7) == "0.000000");
+    CHECK(capture.HasNext() == false);
+}
+
+TEST_CASE("Test sliding window of audio capture")
+{
+    std::string testResources = TEST_RESOURCE_DIR;
+    REQUIRE(testResources != "");
+    std::string file =  testResources + "/" + "myVoiceIsMyPassportVerifyMe04.wav";
+    asr::AudioCapture capture;
+    std::vector<float> audioData = capture.LoadAudioFile(file);
+    capture.InitSlidingWindow(audioData.data(), audioData.size(), 47712, 16000);
+    capture.Next();
+    capture.Next();
+
+    CHECK(capture.HasNext() == true);
+    capture.Next();
+    CHECK(capture.HasNext() == false);
+}
diff --git a/samples/SpeechRecognition/test/DecoderTest.cpp b/samples/SpeechRecognition/test/DecoderTest.cpp
new file mode 100644
index 0000000..13a3905
--- /dev/null
+++ b/samples/SpeechRecognition/test/DecoderTest.cpp
@@ -0,0 +1,86 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <catch.hpp>
+#include <map>
+#include "Decoder.hpp"
+
+std::map<int, std::string> labels = {
+        {0, "a" },
+        {1, "b" },
+        {2, "c" },
+        {3, "d" },
+        {4, "e" },
+        {5, "f" },
+        {6, "g" },
+        {7, "h" },
+        {8, "i" },
+        {9, "j" },
+        {10,"k" },
+        {11,"l" },
+        {12,"m" },
+        {13,"n" },
+        {14,"o" },
+        {15,"p" },
+        {16,"q" },
+        {17,"r" },
+        {18,"s" },
+        {19,"t" },
+        {20,"u" },
+        {21,"v" },
+        {22,"w" },
+        {23,"x" },
+        {24,"y" },
+        {25,"z" },
+        {26, "\'" },
+        {27, " "},
+        {28,"$" }
+};
+
+TEST_CASE("Test Wav2Letter output decoder")
+{
+
+    std::vector<uint16_t> outputValues =
+            {
+            1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+
+            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2
+            };
+
+    std::vector<int8_t> convertedValues;
+
+    for(uint16_t outputVal : outputValues)
+    {
+        convertedValues.emplace_back(static_cast<int8_t>(outputVal));
+    }
+
+    asr::Decoder decoder(labels);
+    std::string text = decoder.DecodeOutput<int8_t>(convertedValues);
+    CHECK(text == "hello");
+}
+
+
diff --git a/samples/SpeechRecognition/test/MFCCTest.cpp b/samples/SpeechRecognition/test/MFCCTest.cpp
new file mode 100644
index 0000000..2a55264
--- /dev/null
+++ b/samples/SpeechRecognition/test/MFCCTest.cpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <catch.hpp>
+#include <limits>
+
+#include "MFCC.hpp"
+
+const std::vector<float> testWav = std::vector<float>{
+    -3.0f, 0.0f, 1.0f, -1.0f, 2.0f, 3.0f, -2.0f, 2.0f,
+            1.0f, -2.0f, 0.0f, 3.0f, -1.0f, 8.0f, 3.0f, 2.0f,
+            -1.0f, -1.0f, 2.0f, 7.0f, 3.0f, 5.0f, 6.0f, 6.0f,
+            6.0f, 12.0f, 5.0f, 6.0f, 3.0f, 3.0f, 5.0f, 4.0f,
+            4.0f, 6.0f, 7.0f, 7.0f, 7.0f, 3.0f, 7.0f, 2.0f,
+            8.0f, 4.0f, 4.0f, 2.0f, -4.0f, -1.0f, -1.0f, -4.0f,
+            2.0f, 1.0f, -1.0f, -4.0f, 0.0f, -7.0f, -6.0f, -2.0f,
+            -5.0f, 1.0f, -5.0f, -1.0f, -7.0f, -3.0f, -3.0f, -7.0f,
+            0.0f, -3.0f, 3.0f, -5.0f, 0.0f, 1.0f, -2.0f, -2.0f,
+            -3.0f, -3.0f, -7.0f, -3.0f, -2.0f, -6.0f, -5.0f, -8.0f,
+            -2.0f, -8.0f, 4.0f, -9.0f, -4.0f, -9.0f, -5.0f, -5.0f,
+            -3.0f, -9.0f, -3.0f, -9.0f, -1.0f, -7.0f, -4.0f, 1.0f,
+            -3.0f, 2.0f, -8.0f, -4.0f, -4.0f, -5.0f, 1.0f, -3.0f,
+            -1.0f, 0.0f, -1.0f, -2.0f, -3.0f, -2.0f, -4.0f, -1.0f,
+            1.0f, -1.0f, 3.0f, 0.0f, 3.0f, 2.0f, 0.0f, 0.0f,
+            0.0f, -3.0f, 1.0f, 1.0f, 0.0f, 8.0f, 3.0f, 4.0f,
+            1.0f, 5.0f, 6.0f, 4.0f, 7.0f, 3.0f, 3.0f, 0.0f,
+            3.0f, 6.0f, 7.0f, 6.0f, 4.0f, 5.0f, 9.0f, 9.0f,
+            5.0f, 5.0f, 8.0f, 1.0f, 6.0f, 9.0f, 6.0f, 6.0f,
+            7.0f, 1.0f, 8.0f, 1.0f, 5.0f, 0.0f, 5.0f, 5.0f,
+            0.0f, 3.0f, 2.0f, 7.0f, 2.0f, -3.0f, 3.0f, 0.0f,
+            3.0f, 0.0f, 0.0f, 0.0f, 2.0f, 0.0f, -1.0f, -1.0f,
+            -2.0f, -3.0f, -8.0f, 0.0f, 1.0f, 0.0f, -3.0f, -3.0f,
+            -3.0f, -2.0f, -3.0f, -3.0f, -4.0f, -6.0f, -2.0f, -8.0f,
+            -9.0f, -4.0f, -1.0f, -5.0f, -3.0f, -3.0f, -4.0f, -3.0f,
+            -6.0f, 3.0f, 0.0f, -1.0f, -2.0f, -9.0f, -4.0f, -2.0f,
+            2.0f, -1.0f, 3.0f, -5.0f, -5.0f, -2.0f, 0.0f, -2.0f,
+            0.0f, -1.0f, -3.0f, 1.0f, -2.0f, 9.0f, 4.0f, 5.0f,
+            2.0f, 2.0f, 1.0f, 0.0f, -6.0f, -2.0f, 0.0f, 0.0f,
+            0.0f, -1.0f, 4.0f, -4.0f, 3.0f, -7.0f, -1.0f, 5.0f,
+            -6.0f, -1.0f, -5.0f, 4.0f, 3.0f, 9.0f, -2.0f, 1.0f,
+            3.0f, 0.0f, 0.0f, -2.0f, 1.0f, 2.0f, 1.0f, 1.0f,
+            0.0f, 3.0f, 2.0f, -1.0f, 3.0f, -3.0f, 7.0f, 0.0f,
+            0.0f, 3.0f, 2.0f, 2.0f, -2.0f, 3.0f, -2.0f, 2.0f,
+            -3.0f, 4.0f, -1.0f, -1.0f, -5.0f, -1.0f, -3.0f, -2.0f,
+            1.0f, -1.0f, 3.0f, 2.0f, 4.0f, 1.0f, 2.0f, -2.0f,
+            0.0f, 2.0f, 7.0f, 0.0f, 8.0f, -3.0f, 6.0f, -3.0f,
+            6.0f, 1.0f, 2.0f, -3.0f, -1.0f, -1.0f, -1.0f, 1.0f,
+            -2.0f, 2.0f, 1.0f, 2.0f, 0.0f, -2.0f, 3.0f, -2.0f,
+            3.0f, -2.0f, 1.0f, 0.0f, -3.0f, -1.0f, -2.0f, -4.0f,
+            -6.0f, -5.0f, -8.0f, -1.0f, -4.0f, 0.0f, -3.0f, -1.0f,
+            -1.0f, -1.0f, 0.0f, -2.0f, -3.0f, -7.0f, -1.0f, 0.0f,
+            1.0f, 5.0f, 0.0f, 5.0f, 1.0f, 1.0f, -3.0f, 0.0f,
+            -6.0f, 3.0f, -8.0f, 4.0f, -8.0f, 6.0f, -6.0f, 1.0f,
+            -6.0f, -2.0f, -5.0f, -6.0f, 0.0f, -5.0f, 4.0f, -1.0f,
+            4.0f, -2.0f, 1.0f, 2.0f, 1.0f, 0.0f, -2.0f, 0.0f,
+            0.0f, 2.0f, -2.0f, 2.0f, -5.0f, 2.0f, 0.0f, -2.0f,
+            1.0f, -2.0f, 0.0f, 5.0f, 1.0f, 0.0f, 1.0f, 5.0f,
+            0.0f, 8.0f, 3.0f, 2.0f, 2.0f, 0.0f, 5.0f, -2.0f,
+            3.0f, 1.0f, 0.0f, 1.0f, 0.0f, -2.0f, -1.0f, -3.0f,
+            1.0f, -1.0f, 3.0f, 0.0f, 3.0f, 0.0f, -2.0f, -1.0f,
+            -4.0f, -4.0f, -4.0f, -1.0f, -4.0f, -4.0f, -3.0f, -6.0f,
+            -3.0f, -7.0f, -3.0f, -1.0f, -2.0f, 0.0f, -5.0f, -4.0f,
+            -7.0f, -3.0f, -2.0f, -2.0f, 1.0f, 2.0f, 2.0f, 8.0f,
+            5.0f, 4.0f, 2.0f, 4.0f, 3.0f, 5.0f, 0.0f, 3.0f,
+            3.0f, 6.0f, 4.0f, 2.0f, 2.0f, -2.0f, 4.0f, -2.0f,
+            3.0f, 3.0f, 2.0f, 1.0f, 1.0f, 4.0f, -5.0f, 2.0f,
+            -3.0f, 0.0f, -1.0f, 1.0f, -2.0f, 2.0f, 5.0f, 1.0f,
+            4.0f, 2.0f, 3.0f, 1.0f, -1.0f, 1.0f, 0.0f, 6.0f,
+            0.0f, -2.0f, -1.0f, 1.0f, -1.0f, 2.0f, -5.0f, -1.0f,
+            -5.0f, -1.0f, -6.0f, -3.0f, -3.0f, 2.0f, 4.0f, 0.0f,
+            -1.0f, -5.0f, 3.0f, -4.0f, -1.0f, -3.0f, -4.0f, 1.0f,
+            -4.0f, 1.0f, -1.0f, -1.0f, 0.0f, -5.0f, -4.0f, -2.0f,
+            -1.0f, -1.0f, -3.0f, -7.0f, -3.0f, -3.0f, 4.0f, 4.0f
+};
+
+TEST_CASE("Test MFCC")
+{
+    int sampFreq = 16000;
+    int frameLenMs = 32;
+    int frameLenSamples = sampFreq * frameLenMs * 0.001;
+    int numMfccFeats = 13;
+
+    std::vector<float> fullAudioData;
+
+        for (auto f : testWav)
+            {
+                fullAudioData.emplace_back( f / (1<<15));
+            }
+
+
+    MfccParams mfccParams(sampFreq, 128, 0, 8000, numMfccFeats, frameLenSamples, false, 1);
+
+    MFCC mfccInst = MFCC(mfccParams);
+    auto mfccOutput = mfccInst.MfccCompute(fullAudioData);
+
+    std::vector<float> expected = { -834.96564f, 21.02699f, 18.62856f, 7.3412f, 18.90791f, -5.36034f, 6.52351f,
+                                    -11.27064f, 8.37522f, 12.0672f, 8.30833f, -13.50008f, -18.1761f};
+
+    REQUIRE_THAT(mfccOutput, Catch::Approx(expected).epsilon(1.e-5) );
+}
\ No newline at end of file
diff --git a/samples/SpeechRecognition/test/PreprocessTest.cpp b/samples/SpeechRecognition/test/PreprocessTest.cpp
new file mode 100644
index 0000000..2b98831
--- /dev/null
+++ b/samples/SpeechRecognition/test/PreprocessTest.cpp
@@ -0,0 +1,136 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <catch.hpp>
+#include <limits>
+
+#include "Preprocess.hpp"
+#include "DataStructures.hpp"
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+    constexpr int int16max = std::numeric_limits<int16_t>::max();
+    int val = 0;
+    for (size_t i = 0; i < vec.size(); ++i, ++val)
+    {
+
+        /* We want a differential filter response from both - order 1
+         * and 2 => Don't have a linear signal here - we use a signal
+         * using squares for example. Alternate sign flips might work
+         * just as well and will be computationally less work! */
+        int valsq = val * val;
+        if (valsq > int16max)
+        {
+            val = 0;
+            valsq = 0;
+        }
+        vec[i] = valsq;
+    }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+    /*Test  Constants: */
+    const uint32_t  windowLen             = 512;
+    const uint32_t  windowStride          = 160;
+    const float     quantScale            = 0.1410219967365265;
+    const int       quantOffset           = -11;
+    int             numMfccVectors        = 10;
+    const int       sampFreq              = 16000;
+    const int       frameLenMs            = 32;
+    const int       frameLenSamples       = sampFreq * frameLenMs * 0.001;
+    const int       numMfccFeats          = 13;
+    const int       audioDataToPreProcess = 512 + ((numMfccVectors -1) * windowStride);
+    int             outputBufferSize = numMfccVectors * numMfccFeats * 3;
+
+    /* Test wav memory */
+    std::vector <int16_t> testWav1((windowStride * numMfccVectors) +
+                              (windowLen - windowStride));
+    /* Populate with dummy input */
+    PopulateTestWavVector(testWav1);
+
+    MfccParams mfccParams(sampFreq, 128, 0, 8000, numMfccFeats, frameLenSamples, false, numMfccVectors);
+
+    MFCC mfccInst = MFCC(mfccParams);
+
+    std::vector<float> fullAudioData;
+
+    for(int i = 0; i < 4; ++i)
+    {
+        for (auto f : testWav1)
+        {
+            fullAudioData.emplace_back(static_cast<float>(f) / (1<<15));
+        }
+    }
+
+    Preprocess prep(frameLenSamples, windowStride, mfccInst);
+
+    std::vector<int8_t> outputBuffer(outputBufferSize);
+
+    prep.Invoke(fullAudioData.data(), audioDataToPreProcess, outputBuffer, quantOffset, quantScale);
+
+    int8_t expectedResult[numMfccVectors][numMfccFeats*3] =
+    {
+            /* Feature vec 0 */
+            -32, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,    /* MFCCs   */
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,    /* Delta 1 */
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,    /* Delta 2 */
+
+            /* Feature vec 1 */
+            -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+            /* Feature vec 2 */
+            -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+            /* Feature vec 3 */
+            -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+            /* Feature vec 4 : this should have valid delta 1 and delta 2 */
+            -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+            -38, -29, -9, 1, -2, -7, -8, -8, -12, -16, -14, -5, 5,
+            -68, -50, -13, 5, 0, -9, -9, -8, -13, -20, -19, -3, 15,
+
+            /* Feature vec 5 : this should have valid delta 1 and delta 2 */
+            -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+            -62, -45, -11, 5, 0, -8, -9, -8, -12, -19, -17, -3, 13,
+            -27, -22, -13, -9, -11, -12, -12, -11, -11, -13, -13, -10, -6,
+
+            /* Feature vec 6 */
+            -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+            /* Feature vec 7 */
+            -32, 4, -9, -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+            /* Feature vec 8 */
+            -32, 4, -9, -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+            /* Feature vec 9 */
+            -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+            -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+            -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+    };
+
+    /* Check that the elements have been calculated correctly */
+    for (uint32_t j = 0; j < numMfccVectors; ++j)
+    {
+        for (uint32_t i = 0; i < numMfccFeats * 3; ++i)
+        {
+            size_t tensorIdx = (j * numMfccFeats * 3) + i;
+            CHECK(static_cast<int16_t>(outputBuffer.at(tensorIdx) == static_cast<int16_t>(expectedResult[j][i])));
+        }
+    }
+}
diff --git a/samples/ObjectDetection/cmake/aarch64-toolchain.cmake b/samples/common/cmake/aarch64-toolchain.cmake
similarity index 100%
rename from samples/ObjectDetection/cmake/aarch64-toolchain.cmake
rename to samples/common/cmake/aarch64-toolchain.cmake
diff --git a/samples/ObjectDetection/cmake/arm-linux-gnueabihf-toolchain.cmake b/samples/common/cmake/arm-linux-gnueabihf-toolchain.cmake
similarity index 100%
rename from samples/ObjectDetection/cmake/arm-linux-gnueabihf-toolchain.cmake
rename to samples/common/cmake/arm-linux-gnueabihf-toolchain.cmake
diff --git a/samples/ObjectDetection/cmake/find_armnn.cmake b/samples/common/cmake/find_armnn.cmake
similarity index 100%
rename from samples/ObjectDetection/cmake/find_armnn.cmake
rename to samples/common/cmake/find_armnn.cmake
diff --git a/samples/ObjectDetection/cmake/find_catch.cmake b/samples/common/cmake/find_catch.cmake
similarity index 100%
rename from samples/ObjectDetection/cmake/find_catch.cmake
rename to samples/common/cmake/find_catch.cmake
diff --git a/samples/ObjectDetection/cmake/find_opencv.cmake b/samples/common/cmake/find_opencv.cmake
similarity index 100%
rename from samples/ObjectDetection/cmake/find_opencv.cmake
rename to samples/common/cmake/find_opencv.cmake
diff --git a/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp b/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp
new file mode 100644
index 0000000..96cc1d0
--- /dev/null
+++ b/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp
@@ -0,0 +1,214 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "Types.hpp"
+
+#include "armnn/ArmNN.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include "armnnUtils/DataLayoutIndexed.hpp"
+#include <armnn/Logging.hpp>
+
+#include <string>
+#include <vector>
+
+namespace common
+{
+/**
+* @brief Used to load in a network through ArmNN and run inference on it against a given backend.
+*
+*/
+template <class Tout>
+class ArmnnNetworkExecutor
+{
+private:
+    armnn::IRuntimePtr m_Runtime;
+    armnn::NetworkId m_NetId{};
+    mutable InferenceResults<Tout> m_OutputBuffer;
+    armnn::InputTensors     m_InputTensors;
+    armnn::OutputTensors    m_OutputTensors;
+    std::vector<armnnTfLiteParser::BindingPointInfo> m_outputBindingInfo;
+
+    std::vector<std::string> m_outputLayerNamesList;
+
+    armnnTfLiteParser::BindingPointInfo m_inputBindingInfo;
+
+    void PrepareTensors(const void* inputData, const size_t dataBytes);
+
+    template <typename Enumeration>
+    auto log_as_int(Enumeration value)
+    -> typename std::underlying_type<Enumeration>::type
+    {
+        return static_cast<typename std::underlying_type<Enumeration>::type>(value);
+    }
+
+public:
+    ArmnnNetworkExecutor() = delete;
+
+    /**
+    * @brief Initializes the network with the given input data. Parsed through TfLiteParser and optimized for a
+    *        given backend.
+    *
+    * Note that the output layers names order in m_outputLayerNamesList affects the order of the feature vectors
+    * in output of the Run method.
+    *
+    *       * @param[in] modelPath - Relative path to the model file
+    *       * @param[in] backends - The list of preferred backends to run inference on
+    */
+    ArmnnNetworkExecutor(std::string& modelPath,
+                         std::vector<armnn::BackendId>& backends);
+
+    /**
+    * @brief Returns the aspect ratio of the associated model in the order of width, height.
+    */
+    Size GetImageAspectRatio();
+
+    armnn::DataType GetInputDataType() const;
+
+    float GetQuantizationScale();
+
+    int GetQuantizationOffset();
+
+    /**
+    * @brief Runs inference on the provided input data, and stores the results in the provided InferenceResults object.
+    *
+    * @param[in] inputData - input frame data
+    * @param[in] dataBytes - input data size in bytes
+    * @param[out] results - Vector of DetectionResult objects used to store the output result.
+    */
+    bool Run(const void* inputData, const size_t dataBytes, common::InferenceResults<Tout>& outResults);
+
+};
+
+template <class Tout>
+ArmnnNetworkExecutor<Tout>::ArmnnNetworkExecutor(std::string& modelPath,
+                                           std::vector<armnn::BackendId>& preferredBackends)
+        : m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
+{
+    // Import the TensorFlow lite model.
+    armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
+    armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
+
+    std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
+
+    m_inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
+
+    m_outputLayerNamesList = parser->GetSubgraphOutputTensorNames(0);
+
+    std::vector<armnn::BindingPointInfo> outputBindings;
+    for(const std::string& name : m_outputLayerNamesList)
+    {
+        m_outputBindingInfo.push_back(std::move(parser->GetNetworkOutputBindingInfo(0, name)));
+    }
+    std::vector<std::string> errorMessages;
+    // optimize the network.
+    armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
+                                                  preferredBackends,
+                                                  m_Runtime->GetDeviceSpec(),
+                                                  armnn::OptimizerOptions(),
+                                                  armnn::Optional<std::vector<std::string>&>(errorMessages));
+
+    if (!optNet)
+    {
+        const std::string errorMessage{"ArmnnNetworkExecutor: Failed to optimize network"};
+        ARMNN_LOG(error) << errorMessage;
+        throw armnn::Exception(errorMessage);
+    }
+
+    // Load the optimized network onto the m_Runtime device
+    std::string errorMessage;
+    if (armnn::Status::Success != m_Runtime->LoadNetwork(m_NetId, std::move(optNet), errorMessage))
+    {
+        ARMNN_LOG(error) << errorMessage;
+        throw armnn::Exception(errorMessage);
+    }
+
+    //pre-allocate memory for output (the size of it never changes)
+    for (int it = 0; it < m_outputLayerNamesList.size(); ++it)
+    {
+        const armnn::DataType dataType = m_outputBindingInfo[it].second.GetDataType();
+        const armnn::TensorShape& tensorShape = m_outputBindingInfo[it].second.GetShape();
+
+        std::vector<Tout> oneLayerOutResult;
+        oneLayerOutResult.resize(tensorShape.GetNumElements(), 0);
+        m_OutputBuffer.emplace_back(oneLayerOutResult);
+
+        // Make ArmNN output tensors
+        m_OutputTensors.reserve(m_OutputBuffer.size());
+        for (size_t it = 0; it < m_OutputBuffer.size(); ++it)
+        {
+            m_OutputTensors.emplace_back(std::make_pair(
+                    m_outputBindingInfo[it].first,
+                    armnn::Tensor(m_outputBindingInfo[it].second,
+                                  m_OutputBuffer.at(it).data())
+            ));
+        }
+    }
+
+}
+
+template <class Tout>
+armnn::DataType ArmnnNetworkExecutor<Tout>::GetInputDataType() const
+{
+    return m_inputBindingInfo.second.GetDataType();
+}
+
+template <class Tout>
+void ArmnnNetworkExecutor<Tout>::PrepareTensors(const void* inputData, const size_t dataBytes)
+{
+    assert(m_inputBindingInfo.second.GetNumBytes() >= dataBytes);
+    m_InputTensors.clear();
+    m_InputTensors = {{ m_inputBindingInfo.first, armnn::ConstTensor(m_inputBindingInfo.second, inputData)}};
+}
+
+template <class Tout>
+bool ArmnnNetworkExecutor<Tout>::Run(const void* inputData, const size_t dataBytes, InferenceResults<Tout>& outResults)
+{
+    /* Prepare tensors if they are not ready */
+    ARMNN_LOG(debug) << "Preparing tensors...";
+    this->PrepareTensors(inputData, dataBytes);
+    ARMNN_LOG(trace) << "Running inference...";
+
+    armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetId, m_InputTensors, m_OutputTensors);
+
+    std::stringstream inferenceFinished;
+    inferenceFinished << "Inference finished with code {" << log_as_int(ret) << "}\n";
+
+    ARMNN_LOG(trace) << inferenceFinished.str();
+
+    if (ret == armnn::Status::Failure)
+    {
+        ARMNN_LOG(error) << "Failed to perform inference.";
+    }
+
+    outResults.reserve(m_outputLayerNamesList.size());
+    outResults = m_OutputBuffer;
+
+    return (armnn::Status::Success == ret);
+}
+
+template <class Tout>
+float ArmnnNetworkExecutor<Tout>::GetQuantizationScale()
+{
+    return this->m_inputBindingInfo.second.GetQuantizationScale();
+}
+
+template <class Tout>
+int ArmnnNetworkExecutor<Tout>::GetQuantizationOffset()
+{
+    return this->m_inputBindingInfo.second.GetQuantizationOffset();
+}
+
+template <class Tout>
+Size ArmnnNetworkExecutor<Tout>::GetImageAspectRatio()
+{
+    const auto shape = m_inputBindingInfo.second.GetShape();
+    assert(shape.GetNumDimensions() == 4);
+    armnnUtils::DataLayoutIndexed nhwc(armnn::DataLayout::NHWC);
+    return Size(shape[nhwc.GetWidthIndex()],
+                shape[nhwc.GetHeightIndex()]);
+}
+}// namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvVideoFileWriter.hpp b/samples/common/include/CVUtils/CvVideoFileWriter.hpp
similarity index 97%
rename from samples/ObjectDetection/include/CvVideoFileWriter.hpp
rename to samples/common/include/CVUtils/CvVideoFileWriter.hpp
index ea1501b..30348f0 100644
--- a/samples/ObjectDetection/include/CvVideoFileWriter.hpp
+++ b/samples/common/include/CVUtils/CvVideoFileWriter.hpp
@@ -8,7 +8,7 @@
 #include "IFrameOutput.hpp"
 #include <opencv2/opencv.hpp>
 
-namespace od
+namespace common
 {
 
 class CvVideoFileWriter : public IFrameOutput<cv::Mat> {
@@ -58,4 +58,4 @@
     cv::VideoWriter m_cvWriter{};
     bool m_ready = false;
 };
-}// namespace od
\ No newline at end of file
+}// namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvVideoFrameReader.hpp b/samples/common/include/CVUtils/CvVideoFrameReader.hpp
similarity index 93%
rename from samples/ObjectDetection/include/CvVideoFrameReader.hpp
rename to samples/common/include/CVUtils/CvVideoFrameReader.hpp
index 081f926..96d94f4 100644
--- a/samples/ObjectDetection/include/CvVideoFrameReader.hpp
+++ b/samples/common/include/CVUtils/CvVideoFrameReader.hpp
@@ -8,7 +8,7 @@
 #include "IFrameReader.hpp"
 #include <opencv2/opencv.hpp>
 
-namespace od
+namespace common
 {
 
 class CvVideoFrameReader :
@@ -95,14 +95,14 @@
     CvVideoFrameReaderRgbWrapper(const CvVideoFrameReaderRgbWrapper& o) = delete;
     CvVideoFrameReaderRgbWrapper(CvVideoFrameReaderRgbWrapper&& o) = delete;
 
-    CvVideoFrameReaderRgbWrapper(std::unique_ptr<od::CvVideoFrameReader> reader);
+    CvVideoFrameReaderRgbWrapper(std::unique_ptr<common::CvVideoFrameReader> reader);
 
     std::shared_ptr<cv::Mat> ReadFrame() override;
 
     bool IsExhausted(const std::shared_ptr<cv::Mat>& frame) const override;
 
 private:
-    std::unique_ptr<od::CvVideoFrameReader> m_reader;
+    std::unique_ptr<common::CvVideoFrameReader> m_reader;
 };
 
-}// namespace od
\ No newline at end of file
+}// namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvWindowOutput.hpp b/samples/common/include/CVUtils/CvWindowOutput.hpp
similarity index 95%
rename from samples/ObjectDetection/include/CvWindowOutput.hpp
rename to samples/common/include/CVUtils/CvWindowOutput.hpp
index 317327b..4b9ae3b 100644
--- a/samples/ObjectDetection/include/CvWindowOutput.hpp
+++ b/samples/common/include/CVUtils/CvWindowOutput.hpp
@@ -8,7 +8,7 @@
 #include "IFrameOutput.hpp"
 #include <opencv2/opencv.hpp>
 
-namespace od
+namespace common
 {
 
 class CvWindowOutput : public IFrameOutput<cv::Mat> {
@@ -50,4 +50,4 @@
     std::string m_windowName;
 
 };
-}// namespace od
\ No newline at end of file
+}// namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/IFrameOutput.hpp b/samples/common/include/CVUtils/IFrameOutput.hpp
similarity index 95%
rename from samples/ObjectDetection/include/IFrameOutput.hpp
rename to samples/common/include/CVUtils/IFrameOutput.hpp
index c8b4fe5..6f7ca0b 100644
--- a/samples/ObjectDetection/include/IFrameOutput.hpp
+++ b/samples/common/include/CVUtils/IFrameOutput.hpp
@@ -8,7 +8,7 @@
 #include <cstddef>
 #include <memory>
 
-namespace od
+namespace common
 {
 /**
  * @brief Frames output interface
@@ -45,4 +45,4 @@
 
     };
 
-}// namespace od
+}// namespace common
diff --git a/samples/ObjectDetection/include/IFrameReader.hpp b/samples/common/include/CVUtils/IFrameReader.hpp
similarity index 96%
rename from samples/ObjectDetection/include/IFrameReader.hpp
rename to samples/common/include/CVUtils/IFrameReader.hpp
index d371b7d..e171b3b 100644
--- a/samples/ObjectDetection/include/IFrameReader.hpp
+++ b/samples/common/include/CVUtils/IFrameReader.hpp
@@ -8,7 +8,7 @@
 #include <cstddef>
 #include <memory>
 
-namespace od
+namespace common
 {
 /**
  * @brief Frame source reader interface
@@ -42,4 +42,4 @@
 
 };
 
-}// namespace od
\ No newline at end of file
+}// namespace common
\ No newline at end of file
diff --git a/samples/common/include/Utils/CmdArgsParser.hpp b/samples/common/include/Utils/CmdArgsParser.hpp
new file mode 100644
index 0000000..710a33d
--- /dev/null
+++ b/samples/common/include/Utils/CmdArgsParser.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+#include <string>
+#include <map>
+
+/*
+ * Checks that a particular option was specified by the user
+ */
+bool CheckOptionSpecified(const std::map<std::string, std::string>& options, const std::string& option);
+
+
+/*
+ * Retrieves the user provided option
+ */
+std::string GetSpecifiedOption(const std::map<std::string, std::string>& options, const std::string& option);
+
+
+/*
+ * Parses all the command line options provided by the user and stores in a map.
+ */
+int ParseOptions(std::map<std::string, std::string>& options, std::map<std::string, std::string>& acceptedOptions,
+                 char *argv[], int argc);
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/Types.hpp b/samples/common/include/Utils/Types.hpp
similarity index 78%
rename from samples/ObjectDetection/include/Types.hpp
rename to samples/common/include/Utils/Types.hpp
index 801cff3..4d1f708 100644
--- a/samples/ObjectDetection/include/Types.hpp
+++ b/samples/common/include/Utils/Types.hpp
@@ -9,9 +9,10 @@
 #include <cstdint>
 #include <vector>
 #include <tuple>
+
 #include <armnn/BackendId.hpp>
 
-namespace od
+namespace common
 {
 
 struct Size
@@ -38,13 +39,16 @@
     std::tuple<int, int, int> colorCode;
 };
 
-struct ODPipelineOptions
+struct PipelineOptions
 {
     std::string m_ModelName;
     std::string m_ModelFilePath;
     std::vector<armnn::BackendId> m_backends;
 };
 
-using InferenceResult = std::vector<float>;
-using InferenceResults = std::vector<InferenceResult>;
-}
\ No newline at end of file
+template<typename T>
+using InferenceResult = std::vector<T>;
+
+template<typename T>
+using InferenceResults = std::vector<InferenceResult<T>>;
+} // namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/CvVideoFileWriter.cpp b/samples/common/src/CVUtils/CvVideoFileWriter.cpp
similarity index 95%
rename from samples/ObjectDetection/src/CvVideoFileWriter.cpp
rename to samples/common/src/CVUtils/CvVideoFileWriter.cpp
index ab80b95..b766300 100644
--- a/samples/ObjectDetection/src/CvVideoFileWriter.cpp
+++ b/samples/common/src/CVUtils/CvVideoFileWriter.cpp
@@ -5,7 +5,7 @@
 
 #include "CvVideoFileWriter.hpp"
 
-namespace od
+namespace common
 {
 
 void CvVideoFileWriter::Init(const std::string& outputVideo, int encoding, double fps, int width, int height)
@@ -35,4 +35,4 @@
 {
     m_cvWriter.release();
 }
-}// namespace od
+}// namespace common
diff --git a/samples/ObjectDetection/src/CvVideoFrameReader.cpp b/samples/common/src/CVUtils/CvVideoFrameReader.cpp
similarity index 96%
rename from samples/ObjectDetection/src/CvVideoFrameReader.cpp
rename to samples/common/src/CVUtils/CvVideoFrameReader.cpp
index 09b5050..2bd92d2 100644
--- a/samples/ObjectDetection/src/CvVideoFrameReader.cpp
+++ b/samples/common/src/CVUtils/CvVideoFrameReader.cpp
@@ -6,7 +6,7 @@
 
 #include "CvVideoFrameReader.hpp"
 
-namespace od
+namespace common
 {
 
 std::shared_ptr<cv::Mat> CvVideoFrameReader::ReadFrame()
@@ -91,8 +91,8 @@
     return m_reader->IsExhausted(frame);
 }
 
-CvVideoFrameReaderRgbWrapper::CvVideoFrameReaderRgbWrapper(std::unique_ptr<od::CvVideoFrameReader> reader):
+CvVideoFrameReaderRgbWrapper::CvVideoFrameReaderRgbWrapper(std::unique_ptr<common::CvVideoFrameReader> reader):
         m_reader(std::move(reader))
 {}
 
-}// namespace od
\ No newline at end of file
+}// namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/CvWindowOutput.cpp b/samples/common/src/CVUtils/CvWindowOutput.cpp
similarity index 93%
rename from samples/ObjectDetection/src/CvWindowOutput.cpp
rename to samples/common/src/CVUtils/CvWindowOutput.cpp
index a32147b..190a760 100644
--- a/samples/ObjectDetection/src/CvWindowOutput.cpp
+++ b/samples/common/src/CVUtils/CvWindowOutput.cpp
@@ -5,7 +5,7 @@
 
 #include "CvWindowOutput.hpp"
 
-namespace od
+namespace common
 {
 
 void CvWindowOutput::Init(const std::string& windowName)
@@ -30,4 +30,4 @@
 {
     return true;
 }
-}// namespace od
\ No newline at end of file
+}// namespace common
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/CmdArgsParser.cpp b/samples/common/src/Utils/CmdArgsParser.cpp
similarity index 97%
rename from samples/ObjectDetection/src/CmdArgsParser.cpp
rename to samples/common/src/Utils/CmdArgsParser.cpp
index b8c74bc..1f09826 100644
--- a/samples/ObjectDetection/src/CmdArgsParser.cpp
+++ b/samples/common/src/Utils/CmdArgsParser.cpp
@@ -45,7 +45,7 @@
                 std::string value = argv[++i];
                 options.insert({it->first, value});
             }
-            else if (std::string(argv[i]) == HELP)
+            else if (std::string(argv[i]) == "HELP")
             {
                 std::cout << "Available options" << std::endl;
                 for (auto & acceptedOption : acceptedOptions)