MLECO-1252 ASR sample application using the public ArmNN C++ API.

Change-Id: I98cd505b8772a8c8fa88308121bc94135bb45068
Signed-off-by: Éanna Ó Catháin <eanna.ocathain@arm.com>
diff --git a/samples/ObjectDetection/CMakeLists.txt b/samples/ObjectDetection/CMakeLists.txt
index 9e85fab..7e587f7 100644
--- a/samples/ObjectDetection/CMakeLists.txt
+++ b/samples/ObjectDetection/CMakeLists.txt
@@ -38,12 +38,16 @@
     set(DEPENDENCIES_DIR ${CMAKE_BINARY_DIR}/dependencies)
 endif()
 
-include(cmake/find_opencv.cmake)
-include(cmake/find_armnn.cmake)
+include(../common/cmake/find_opencv.cmake)
+include(../common/cmake/find_armnn.cmake)
 
 include_directories(include)
+include_directories(../common/include/ArmnnUtils)
+include_directories(../common/include/Utils)
+include_directories(../common/include/CVUtils)
 
 file(GLOB SOURCES "src/*.cpp")
+file(GLOB COMMON_SOURCES "../common/src/**/*.cpp")
 list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/Main.cpp)
 file(GLOB TEST_SOURCES "test/*.cpp")
 file(GLOB APP_MAIN "src/Main.cpp")
@@ -55,7 +59,7 @@
 
 set(APP_TARGET_NAME "${CMAKE_PROJECT_NAME}")
 
-add_executable("${APP_TARGET_NAME}" ${SOURCES} ${APP_MAIN})
+add_executable("${APP_TARGET_NAME}" ${SOURCES} ${COMMON_SOURCES} ${APP_MAIN})
 
 if (NOT OPENCV_LIBS_FOUND)
     message("Building OpenCV libs")
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index bceaa4b..408917e 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -230,7 +230,6 @@
 * --preferred-backends: Takes the preferred backends in preference order, separated by comma.
                         For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc].
                         Defaults to CpuRef **[OPTIONAL]**
-* --help: Prints all the available options to screen
 
 ### Object Detection on a supplied video file
 
@@ -379,8 +378,8 @@
 Generic object detection pipeline has 3 steps to perform data pre-processing, run inference and decode inference results
 in the post-processing step.
 
-See [`ObjDetectionPipeline`](./include/NetworkPipeline.hpp) and implementations for [`MobileNetSSDv1`](./include/NetworkPipeline.hpp)
-and [`YoloV3Tiny`](./include/NetworkPipeline.hpp) for more details.
+See [`ObjDetectionPipeline`](include/ObjectDetectionPipeline.hpp) and implementations for [`MobileNetSSDv1`](include/ObjectDetectionPipeline.hpp)
+and [`YoloV3Tiny`](include/ObjectDetectionPipeline.hpp) for more details.
 
 #### Pre-processing the Captured Frame
 Each frame captured from source is read as an `cv::Mat` in BGR format but channels are swapped to RGB in a frame reader
diff --git a/samples/ObjectDetection/cmake/aarch64-toolchain.cmake b/samples/ObjectDetection/cmake/aarch64-toolchain.cmake
deleted file mode 100644
index bdd02f8..0000000
--- a/samples/ObjectDetection/cmake/aarch64-toolchain.cmake
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-
-# specify the cross compiler
-set(GNU_MACHINE "aarch64-linux-gnu")
-set(CROSS_PREFIX "aarch64-linux-gnu-")
-
-set(CMAKE_C_COMPILER   ${CROSS_PREFIX}gcc)
-set(CMAKE_CXX_COMPILER ${CROSS_PREFIX}g++)
-set(CMAKE_AR           ${CROSS_PREFIX}ar)
-set(CMAKE_STRIP        ${CROSS_PREFIX}strip)
-set(CMAKE_LINKER       ${CROSS_PREFIX}ld)
-
-set(CMAKE_CROSSCOMPILING true)
-set(CMAKE_SYSTEM_NAME Linux)
-
-set(CMAKE_SYSTEM_PROCESSOR aarch64)
-
-set(OPENCV_EXTRA_ARGS   "-DENABLE_NEON=ON"
-                        "-DCMAKE_TOOLCHAIN_FILE=platforms/linux/aarch64-gnu.toolchain.cmake")
\ No newline at end of file
diff --git a/samples/ObjectDetection/cmake/arm-linux-gnueabihf-toolchain.cmake b/samples/ObjectDetection/cmake/arm-linux-gnueabihf-toolchain.cmake
deleted file mode 100644
index f66b964..0000000
--- a/samples/ObjectDetection/cmake/arm-linux-gnueabihf-toolchain.cmake
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-
-# specify the cross compiler
-set(GNU_MACHINE "arm-linux-gnueabihf")
-set(CROSS_PREFIX "arm-linux-gnueabihf-")
-
-set(CMAKE_C_COMPILER   ${CROSS_PREFIX}gcc)
-set(CMAKE_CXX_COMPILER ${CROSS_PREFIX}g++)
-set(CMAKE_AR           ${CROSS_PREFIX}ar)
-set(CMAKE_STRIP        ${CROSS_PREFIX}strip)
-set(CMAKE_LINKER       ${CROSS_PREFIX}ld)
-
-set(CMAKE_CROSSCOMPILING true)
-set(CMAKE_SYSTEM_NAME Linux)
-
-set(CMAKE_SYSTEM_PROCESSOR arm)
-
-set(OPENCV_EXTRA_ARGS   "-DENABLE_NEON=ON"
-                        "-DCMAKE_TOOLCHAIN_FILE=platforms/linux/arm.toolchain.cmake")
\ No newline at end of file
diff --git a/samples/ObjectDetection/cmake/find_armnn.cmake b/samples/ObjectDetection/cmake/find_armnn.cmake
deleted file mode 100644
index 289e912..0000000
--- a/samples/ObjectDetection/cmake/find_armnn.cmake
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-# Search for ArmNN built libraries in user-provided path first, then current repository, then system
-
-set(ARMNN_LIB_NAMES "libarmnn.so"
-    "libarmnnTfLiteParser.so")
-
-set(ARMNN_LIBS "")
-
-get_filename_component(PARENT_DIR ${PROJECT_SOURCE_DIR} DIRECTORY)
-get_filename_component(REPO_DIR ${PARENT_DIR} DIRECTORY)
-
-foreach(armnn_lib ${ARMNN_LIB_NAMES})
-    find_library(ARMNN_${armnn_lib}
-        NAMES
-            ${armnn_lib}
-        HINTS
-            ${ARMNN_LIB_DIR} ${REPO_DIR}
-        PATHS
-            ${ARMNN_LIB_DIR} ${REPO_DIR}
-        PATH_SUFFIXES
-            "lib"
-            "lib64")
-    if(ARMNN_${armnn_lib})
-        message("Found library ${ARMNN_${armnn_lib}}")
-        list(APPEND ARMNN_LIBS ${ARMNN_${armnn_lib}})
-        get_filename_component(LIB_DIR ${ARMNN_${armnn_lib}} DIRECTORY)
-        get_filename_component(LIB_PARENT_DIR ${LIB_DIR} DIRECTORY)
-        set(ARMNN_INCLUDE_DIR ${LIB_PARENT_DIR}/include)
-    endif()
-endforeach()
-
-if(NOT ARMNN_LIBS)
-    message(FATAL_ERROR "Could not find ArmNN libraries ${ARMNN_LIB_NAMES}")
-endif()
diff --git a/samples/ObjectDetection/cmake/find_catch.cmake b/samples/ObjectDetection/cmake/find_catch.cmake
deleted file mode 100644
index 584b807..0000000
--- a/samples/ObjectDetection/cmake/find_catch.cmake
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-
-#Test TPIP
-set(TEST_TPIP ${DEPENDENCIES_DIR}/test)
-file(MAKE_DIRECTORY ${TEST_TPIP})
-set(TEST_TPIP_INCLUDE ${TEST_TPIP}/include)
-file(MAKE_DIRECTORY ${TEST_TPIP_INCLUDE})
-
-ExternalProject_Add(catch2-headers
-    URL https://github.com/catchorg/Catch2/releases/download/v2.11.1/catch.hpp
-    DOWNLOAD_NO_EXTRACT 1
-    CONFIGURE_COMMAND ""
-    BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/catch.hpp ${TEST_TPIP_INCLUDE}
-    INSTALL_COMMAND ""
-    )
\ No newline at end of file
diff --git a/samples/ObjectDetection/cmake/find_opencv.cmake b/samples/ObjectDetection/cmake/find_opencv.cmake
deleted file mode 100644
index 92086e1..0000000
--- a/samples/ObjectDetection/cmake/find_opencv.cmake
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-# SPDX-License-Identifier: MIT
-
-set(OPENCV_VERSION 4.0.0)
-set(FFMPEG_VERSION 4.2.1)
-set(LIBX264_VERSION stable)
-
-set(OPENCV_LIB OpenCV${OPENCV_VERSION})
-set(FFMPEG_LIB ffmpeg${FFMPEG_VERSION})
-set(X264_LIB   x264${LIBX264_VERSION})
-
-set(OPENCV_NAMES
-    libopencv_core.so.${OPENCV_VERSION}
-    libopencv_imgproc.so.${OPENCV_VERSION}
-    libopencv_imgcodecs.so.${OPENCV_VERSION}
-    libopencv_videoio.so.${OPENCV_VERSION}
-    libopencv_video.so.${OPENCV_VERSION}
-    libopencv_highgui.so.${OPENCV_VERSION})
-
-set(OPENCV_LIBS)
-set(FFMPEG_LIBS)
-
-foreach(opencv_lib ${OPENCV_NAMES})
-    find_library(OPENCV_${opencv_lib}
-        NAMES
-            ${opencv_lib}
-        HINTS
-            ${OPENCV_LIB_DIR}
-        PATHS
-            ${OPENCV_LIB_DIR}
-        PATH_SUFFIXES
-            "lib"
-            "lib64")
-    if(OPENCV_${opencv_lib})
-        message("Found library ${OPENCV_${opencv_lib}}")
-        list(APPEND OPENCV_LIBS ${OPENCV_${opencv_lib}})
-        get_filename_component(OPENCV_LIB_DIR ${OPENCV_${opencv_lib}} DIRECTORY)
-        get_filename_component(OPENCV_ROOT_DIR ${OPENCV_LIB_DIR} DIRECTORY)
-        set(OPENCV_INCLUDE_DIR ${OPENCV_ROOT_DIR}/include/opencv4)
-    endif()
-endforeach()
-
-if(OPENCV_LIBS)
-    message("OpenCV libraries found")
-    set(OPENCV_LIBS_FOUND TRUE)
-else()
-    set(OPENCV_ROOT_DIR ${DEPENDENCIES_DIR}/opencv)
-    set(OPENCV_DEPENDENCIES_ARGS)
-    set(OPENCV_EXTRA_LINKER_ARGS)
-    set(OPENCV_PKGCONFIG)
-
-    if(CMAKE_CROSSCOMPILING)
-        set(FFMPEG_ROOT_DIR ${DEPENDENCIES_DIR}/ffmpeg)
-        set(LIBX264_ROOT_DIR ${DEPENDENCIES_DIR}/x264)
-
-        if (CMAKE_BUILD_TYPE STREQUAL Debug)
-            set(CONFIGURE_DEBUG --enable-debug)
-            set(OPENCV_DEBUG "-DBUILD_WITH_DEBUG_INFO=ON")
-        endif()
-
-
-        ExternalProject_Add(${X264_LIB}
-            URL "https://code.videolan.org/videolan/x264/-/archive/${LIBX264_VERSION}/x264-${LIBX264_VERSION}.tar.gz"
-            DOWNLOAD_DIR ${LIBX264_ROOT_DIR}
-            PREFIX ${LIBX264_ROOT_DIR}
-            CONFIGURE_COMMAND <SOURCE_DIR>/configure
-            --host=${GNU_MACHINE}
-            --enable-static
-            --enable-shared
-            --cross-prefix=${CROSS_PREFIX}
-            --prefix=${CMAKE_BINARY_DIR}
-            --extra-ldflags=-static-libstdc++
-            --extra-cflags=-fPIC
-            ${CONFIGURE_DEBUG}
-            INSTALL_DIR ${CMAKE_BINARY_DIR}
-            BUILD_COMMAND $(MAKE)
-            INSTALL_COMMAND $(MAKE) install
-            )
-
-        set(FFMPEG_Config
-            --enable-shared
-            --enable-cross-compile
-            --cross-prefix=${CROSS_PREFIX}
-            --arch=${CMAKE_SYSTEM_PROCESSOR}
-            --target-os=linux
-            --prefix=${CMAKE_BINARY_DIR}
-            --enable-gpl
-            --enable-nonfree
-            --enable-libx264
-            --extra-cflags=-I${CMAKE_BINARY_DIR}/include
-            --extra-cflags=-fPIC
-            --extra-ldflags=-L${CMAKE_BINARY_DIR}/lib
-            --extra-libs=-ldl
-            --extra-libs=-static-libstdc++
-        )
-
-        ExternalProject_Add(${FFMPEG_LIB}
-            URL "https://github.com/FFmpeg/FFmpeg/archive/n${FFMPEG_VERSION}.tar.gz"
-            URL_HASH MD5=05792c611d1e3ebdf2c7003ff4467390
-            DOWNLOAD_DIR ${FFMPEG_ROOT_DIR}
-            PREFIX ${FFMPEG_ROOT_DIR}
-            CONFIGURE_COMMAND <SOURCE_DIR>/configure ${FFMPEG_Config} ${CONFIGURE_DEBUG}
-            INSTALL_DIR ${CMAKE_BINARY_DIR}
-            BUILD_COMMAND $(MAKE) VERBOSE=1
-            INSTALL_COMMAND $(MAKE) install
-        )
-
-        set(OPENCV_DEPENDENCIES_ARGS "-static-libstdc++ -Wl,-rpath,${CMAKE_BINARY_DIR}/lib")
-        set(OPENCV_EXTRA_LINKER_ARGS "-DOPENCV_EXTRA_EXE_LINKER_FLAGS=${OPENCV_DEPENDENCIES_ARGS}")
-
-        set(OPENCV_PKGCONFIG "PKG_CONFIG_LIBDIR=${CMAKE_BINARY_DIR}/lib/pkgconfig")
-
-        set(FFMPEG_NAMES
-            libavcodec.so
-            libavformat.so
-            libavutil.so
-            libswscale.so
-            )
-
-        foreach(ffmpeg_lib ${FFMPEG_NAMES})
-            add_library(FFMPEG_${ffmpeg_lib} SHARED IMPORTED)
-            set_target_properties(FFMPEG_${ffmpeg_lib} PROPERTIES IMPORTED_LOCATION ${CMAKE_BINARY_DIR}/lib/${ffmpeg_lib})
-            list(APPEND OPENCV_LIBS FFMPEG_${ffmpeg_lib})
-        endforeach()
-
-        add_library(X264_lib264.so SHARED IMPORTED)
-        set_target_properties(X264_lib264.so PROPERTIES IMPORTED_LOCATION ${CMAKE_BINARY_DIR}/lib/libx264.so)
-        list(APPEND OPENCV_LIBS X264_lib264.so)
-    endif()
-
-    set(OPENCV_CMAKE_ARGS
-        -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-        -DCMAKE_C_FLAGS=-fPIC
-        -DCMAKE_CXX_FLAGS=-fPIC
-        -DWITH_GTK=OFF
-        -DWITH_JPEG=ON
-        -DWITH_IPP=OFF
-        -DBUILD_opencv_java_bindings_generator=OFF
-        -DBUILD_opencv_ml=OFF
-        -DBUILD_opencv_objdetect=OFF
-        -DBUILD_opencv_photo=OFF
-        -DBUILD_opencv_python_bindings_generator=OFF
-        -DBUILD_opencv_stitching=OFF
-        -DBUILD_opencv_gapi=OFF
-        -DBUILD_opencv_features2d=OFF
-        -DBUILD_opencv_dnn=OFF
-        -DBUILD_opencv_flann=OFF
-        -DBUILD_opencv_calib3d=OFF
-        -DBUILD_opencv_python2=OFF
-        -DBUILD_opencv_python3=OFF
-        -DBUILD_opencv_java=OFF
-        -DBUILD_opencv_js=OFF
-        -DBUILD_opencv_ts=OFF
-        -DBUILD_JPEG=ON
-        -DBUILD_JPEG_TURBO_DISABLE=ON
-        -DBUILD_PNG=ON
-        -DBUILD_TIFF=ON
-        -DZLIB_FOUND=OFF
-        -DBUILD_ZLIB=ON
-        -DBUILD_PERF_TESTS=OFF
-        -DBUILD_TESTS=OFF
-        -DBUILD_DOCS=OFF
-        -DBUILD_opencv_apps=OFF
-        -DBUILD_EXAMPLES=OFF
-        -DWITH_V4L=ON
-        -DWITH_LIBV4L=OFF
-        -DWITH_FFMPEG=ON
-        -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR}
-        -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-        -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-        -DCMAKE_INSTALL_RPATH=\$ORIGIN:\$ORIGIN/lib:\$ORIGIN/../lib
-        -DCMAKE_SHARED_LINKER_FLAGS=-static-libstdc++
-        ${OPENCV_DEBUG}
-        )
-
-    ExternalProject_Add(${OPENCV_LIB}
-        URL "https://codeload.github.com/opencv/opencv/tar.gz/${OPENCV_VERSION}"
-        URL_HASH MD5=f051c1ff7b327b60123d71b53801b316
-        DOWNLOAD_DIR ${OPENCV_ROOT_DIR}
-        PREFIX ${OPENCV_ROOT_DIR}
-        CONFIGURE_COMMAND ${OPENCV_PKGCONFIG}
-        ${CMAKE_COMMAND} ${OPENCV_CMAKE_ARGS} ${OPENCV_EXTRA_ARGS}
-        ${OPENCV_EXTRA_LINKER_ARGS} ${OPENCV_ROOT_DIR}/src/${OPENCV_LIB}
-        INSTALL_DIR ${CMAKE_BINARY_DIR}
-        BUILD_COMMAND $(MAKE)
-        INSTALL_COMMAND $(MAKE) install
-        )
-
-    if(CMAKE_CROSSCOMPILING)
-        ExternalProject_Add_StepDependencies(${FFMPEG_LIB} build ${X264_LIB})
-        ExternalProject_Add_StepDependencies(${OPENCV_LIB} build ${FFMPEG_LIB})
-    endif()
-
-    set(OPENCV_INCLUDE_DIR ${CMAKE_BINARY_DIR}/include/opencv4)
-    set(OPENCV_LIB_DIR ${CMAKE_BINARY_DIR}/lib)
-
-    foreach(opencv_lib ${OPENCV_NAMES})
-        add_library(OPENCV_${opencv_lib} SHARED IMPORTED)
-        set_target_properties(OPENCV_${opencv_lib} PROPERTIES IMPORTED_LOCATION ${OPENCV_LIB_DIR}/${opencv_lib})
-        list(APPEND OPENCV_LIBS OPENCV_${opencv_lib})
-    endforeach()
-
-endif()
\ No newline at end of file
diff --git a/samples/ObjectDetection/cmake/unit_tests.cmake b/samples/ObjectDetection/cmake/unit_tests.cmake
index dcfa512..1a8c466 100644
--- a/samples/ObjectDetection/cmake/unit_tests.cmake
+++ b/samples/ObjectDetection/cmake/unit_tests.cmake
@@ -7,7 +7,7 @@
 
 file(GLOB TEST_SOURCES "test/*")
 
-include(cmake/find_catch.cmake)
+include(../common/cmake/find_catch.cmake)
 
 file(DOWNLOAD "https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip"
         ${CMAKE_CURRENT_SOURCE_DIR}/test/resources/models.zip SHOW_PROGRESS)
@@ -43,7 +43,7 @@
         INSTALL_COMMAND ""
         )
 
-add_executable("${TEST_TARGET_NAME}" ${SOURCES} ${TEST_SOURCES})
+add_executable("${TEST_TARGET_NAME}" ${SOURCES} ${TEST_SOURCES} ${COMMON_SOURCES})
 
 add_dependencies(
     "${TEST_TARGET_NAME}"
@@ -60,6 +60,6 @@
 
 target_include_directories("${TEST_TARGET_NAME}" PUBLIC ${TEST_TPIP_INCLUDE}
     ${ARMNN_INCLUDE_DIR}
-    ${OPENCV_INCLUDE_DIR} ${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR})
+    ${OPENCV_INCLUDE_DIR} ${DEPENDENCIES_DIR} ${TEST_RESOURCES_DIR} ${COMMON_INCLUDE_DIR})
 
 target_link_libraries("${TEST_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} ${OPENCV_LIBS} ${FFMPEG_LIBS})
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp b/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
deleted file mode 100644
index c75b68b..0000000
--- a/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "Types.hpp"
-
-#include "armnn/ArmNN.hpp"
-#include "armnnTfLiteParser/ITfLiteParser.hpp"
-#include "armnnUtils/DataLayoutIndexed.hpp"
-#include <armnn/Logging.hpp>
-
-#include <string>
-#include <vector>
-
-namespace od
-{
-/**
-* @brief Used to load in a network through ArmNN and run inference on it against a given backend.
-*
-*/
-class ArmnnNetworkExecutor
-{
-private:
-    armnn::IRuntimePtr m_Runtime;
-    armnn::NetworkId m_NetId{};
-    mutable InferenceResults m_OutputBuffer;
-    armnn::InputTensors     m_InputTensors;
-    armnn::OutputTensors    m_OutputTensors;
-    std::vector<armnnTfLiteParser::BindingPointInfo> m_outputBindingInfo;
-
-    std::vector<std::string> m_outputLayerNamesList;
-
-    armnnTfLiteParser::BindingPointInfo m_inputBindingInfo;
-
-    void PrepareTensors(const void* inputData, const size_t dataBytes);
-
-    template <typename Enumeration>
-    auto log_as_int(Enumeration value)
-    -> typename std::underlying_type<Enumeration>::type
-    {
-        return static_cast<typename std::underlying_type<Enumeration>::type>(value);
-    }
-
-public:
-    ArmnnNetworkExecutor() = delete;
-
-    /**
-    * @brief Initializes the network with the given input data. Parsed through TfLiteParser and optimized for a
-    *        given backend.
-    *
-    * Note that the output layers names order in m_outputLayerNamesList affects the order of the feature vectors
-    * in output of the Run method.
-    *
-    *       * @param[in] modelPath - Relative path to the model file
-    *       * @param[in] backends - The list of preferred backends to run inference on
-    */
-    ArmnnNetworkExecutor(std::string& modelPath,
-                         std::vector<armnn::BackendId>& backends);
-
-    /**
-    * @brief Returns the aspect ratio of the associated model in the order of width, height.
-    */
-    Size GetImageAspectRatio();
-
-    armnn::DataType GetInputDataType() const;
-
-    /**
-    * @brief Runs inference on the provided input data, and stores the results in the provided InferenceResults object.
-    *
-    * @param[in] inputData - input frame data
-    * @param[in] dataBytes - input data size in bytes
-    * @param[out] results - Vector of DetectionResult objects used to store the output result.
-    */
-    bool Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults);
-
-};
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CmdArgsParser.hpp b/samples/ObjectDetection/include/CmdArgsParser.hpp
deleted file mode 100644
index 6c22e6f..0000000
--- a/samples/ObjectDetection/include/CmdArgsParser.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-#include <string>
-#include <map>
-#include <iostream>
-
-const std::string MODEL_NAME = "--model-name";
-const std::string VIDEO_FILE_PATH = "--video-file-path";
-const std::string MODEL_FILE_PATH = "--model-file-path";
-const std::string OUTPUT_VIDEO_FILE_PATH = "--output-video-file-path";
-const std::string LABEL_PATH = "--label-path";
-const std::string PREFERRED_BACKENDS = "--preferred-backends";
-const std::string HELP = "--help";
-
-/*
- * The accepted options for this Object detection executable
- */
-static std::map<std::string, std::string> CMD_OPTIONS = {
-        {VIDEO_FILE_PATH, "[REQUIRED] Path to the video file to run object detection on"},
-        {MODEL_FILE_PATH, "[REQUIRED] Path to the Object Detection model to use"},
-        {LABEL_PATH, "[REQUIRED] Path to the label set for the provided model file. "
-                     "Label file is should just be an ordered list, seperated by new line."},
-        {MODEL_NAME, "[REQUIRED] The name of the model being used. Accepted options: YOLO_V3_TINY, SSD_MOBILE"},
-        {OUTPUT_VIDEO_FILE_PATH, "[OPTIONAL] Path to the output video file with detections added in. "
-                                 "If specified will save file to disk, else displays the output to screen"},
-        {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
-                             " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
-                             " Defaults to CpuAcc,CpuRef"}
-};
-
-/*
- * Checks that a particular option was specified by the user
- */
-bool CheckOptionSpecified(const std::map<std::string, std::string>& options, const std::string& option);
-
-
-/*
- * Retrieves the user provided option
- */
-std::string GetSpecifiedOption(const std::map<std::string, std::string>& options, const std::string& option);
-
-
-/*
- * Parses all the command line options provided by the user and stores in a map.
- */
-int ParseOptions(std::map<std::string, std::string>& options, std::map<std::string, std::string>& acceptedOptions,
-                 char *argv[], int argc);
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvVideoFileWriter.hpp b/samples/ObjectDetection/include/CvVideoFileWriter.hpp
deleted file mode 100644
index ea1501b..0000000
--- a/samples/ObjectDetection/include/CvVideoFileWriter.hpp
+++ /dev/null
@@ -1,61 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "IFrameOutput.hpp"
-#include <opencv2/opencv.hpp>
-
-namespace od
-{
-
-class CvVideoFileWriter : public IFrameOutput<cv::Mat> {
-public:
-    /**
-     * @brief Default constructor.
-     *
-     * Underlying open cv video writer object will be instantiated.
-     */
-    CvVideoFileWriter() = default;
-
-    ~CvVideoFileWriter() override = default;
-
-    /**
-     * @brief Initialises video file writer.
-     *
-     * Opens opencv writer with given params. FFMPEG backend is used.
-     *
-     * @param outputVideo path to the video file.
-     * @param encoding cv::CAP_PROP_FOURCC code.
-     * @param fps target frame rate.
-     * @param width target frame width.
-     * @param height target frame height.
-     *
-     */
-    void Init(const std::string& outputVideo, int encoding, double fps, int width, int height);
-
-    /**
-     * Writes frame to the file using opencv writer.
-     *
-     * @param frame data to write.
-     */
-    void WriteFrame(std::shared_ptr<cv::Mat>& frame) override;
-
-    /**
-     * Releases opencv writer.
-     */
-    void Close() override;
-
-    /**
-     * Checks if opencv writer was successfully opened.
-     * @return true is underlying writer is ready to be used, false otherwise.
-     */
-    bool IsReady() const override;
-
-private:
-    cv::VideoWriter m_cvWriter{};
-    bool m_ready = false;
-};
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvVideoFrameReader.hpp b/samples/ObjectDetection/include/CvVideoFrameReader.hpp
deleted file mode 100644
index 081f926..0000000
--- a/samples/ObjectDetection/include/CvVideoFrameReader.hpp
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-
-#include "IFrameReader.hpp"
-#include <opencv2/opencv.hpp>
-
-namespace od
-{
-
-class CvVideoFrameReader :
-    public IFrameReader<cv::Mat>
-{
-public:
-    /**
-     * @brief Default constructor.
-     *
-     * Underlying open cv video capture object will be instantiated.
-     */
-    CvVideoFrameReader() = default;
-
-    ~CvVideoFrameReader() override = default;
-
-    /**
-     *@brief Initialises reader to capture frames from video file.
-     *
-     * @param source path to the video file or image sequence.
-     *
-     * @throws std::runtime_error if init failed
-     */
-    void Init(const std::string& source);
-
-    std::shared_ptr <cv::Mat> ReadFrame() override;
-
-    bool IsExhausted(const std::shared_ptr <cv::Mat>& frame) const override;
-
-    /**
-     * Returns effective video frame width supported by the source/set by the user.
-     * Must be called after Init method.
-     * @return frame width
-     */
-    int GetSourceWidth() const;
-
-    /**
-     * Returns effective video frame height supported by the source/set by the user.
-     * Must be called after Init method.
-     * @return frame height
-     */
-    int GetSourceHeight() const;
-
-    /**
-     * Returns effective fps value supported by the source/set by the user.
-     * @return fps value
-     */
-    double GetSourceFps() const;
-
-    /**
-     * Will query OpenCV to convert images to RGB
-     * Copy is actually default behaviour, but the set function needs to be called
-     * in order to know whether OpenCV supports conversion from our source format.
-     * @return boolean,
-     *     true:  OpenCV returns RGB
-     *     false: OpenCV returns the fourcc format from GetSourceEncoding
-     */
-    bool ConvertToRGB();
-
-    /**
-     * Returns 4-character code of codec.
-     * @return codec name
-     */
-    std::string GetSourceEncoding() const;
-
-   /**
-    * Get the fourcc int from its string name.
-    * @return codec int
-    */
-    int GetSourceEncodingInt() const;
-
-    int GetFrameCount() const;
-
-private:
-    cv::VideoCapture m_capture;
-
-    void CheckIsOpen(const std::string& source);
-};
-
-class CvVideoFrameReaderRgbWrapper :
-        public IFrameReader<cv::Mat>
-{
-public:
-    CvVideoFrameReaderRgbWrapper() = delete;
-    CvVideoFrameReaderRgbWrapper(const CvVideoFrameReaderRgbWrapper& o) = delete;
-    CvVideoFrameReaderRgbWrapper(CvVideoFrameReaderRgbWrapper&& o) = delete;
-
-    CvVideoFrameReaderRgbWrapper(std::unique_ptr<od::CvVideoFrameReader> reader);
-
-    std::shared_ptr<cv::Mat> ReadFrame() override;
-
-    bool IsExhausted(const std::shared_ptr<cv::Mat>& frame) const override;
-
-private:
-    std::unique_ptr<od::CvVideoFrameReader> m_reader;
-};
-
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvWindowOutput.hpp b/samples/ObjectDetection/include/CvWindowOutput.hpp
deleted file mode 100644
index 317327b..0000000
--- a/samples/ObjectDetection/include/CvWindowOutput.hpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "IFrameOutput.hpp"
-#include <opencv2/opencv.hpp>
-
-namespace od
-{
-
-class CvWindowOutput : public IFrameOutput<cv::Mat> {
-public:
-
-    CvWindowOutput() = default;
-
-    ~CvWindowOutput() override = default;
-
-    /**
-     * @brief Creates a named window.
-     *
-     * Uses opencv to create a window with given name.
-     *
-     * @param windowName opencv window name.
-     *
-     */
-    void Init(const std::string& windowName);
-
-    /**
-     * Writes frame to the window.
-     *
-     * @param frame data to write.
-     */
-    void WriteFrame(std::shared_ptr<cv::Mat>& frame) override;
-
-    /**
-     * Releases all windows.
-     */
-    void Close() override;
-
-    /**
-     * Always true.
-     * @return true.
-     */
-    bool IsReady() const override;
-
-private:
-    std::string m_windowName;
-
-};
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/IDetectionResultDecoder.hpp b/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
index c0a29df..a8a3cbb 100644
--- a/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
+++ b/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
@@ -30,9 +30,9 @@
     *
     * @return     Vector of decoded detected objects.
     */
-    virtual DetectedObjects Decode(const InferenceResults& results,
-                                   const Size& outputFrameSize,
-                                   const Size& resizedFrameSize,
+    virtual DetectedObjects Decode(const common::InferenceResults<float>& results,
+                                   const common::Size& outputFrameSize,
+                                   const common::Size& resizedFrameSize,
                                    const std::vector<std::string>& labels) = 0;
 
 };
diff --git a/samples/ObjectDetection/include/IFrameOutput.hpp b/samples/ObjectDetection/include/IFrameOutput.hpp
deleted file mode 100644
index c8b4fe5..0000000
--- a/samples/ObjectDetection/include/IFrameOutput.hpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <cstddef>
-#include <memory>
-
-namespace od
-{
-/**
- * @brief Frames output interface
- *
- * @tparam FrameDataT frame container data type
- */
-    template<typename FrameDataT> class IFrameOutput
-    {
-
-    public:
-        /**
-         * @brief Writes frame to the selected output
-         *
-         * @param frame container
-         */
-        virtual void WriteFrame(std::shared_ptr <FrameDataT>& frame) = 0;
-
-        /**
-         * @brief Closes the frame output
-         */
-        virtual void Close() = 0;
-
-        /**
-         * @brief Checks if the frame sink is ready to write.
-         *
-         * @return True if frame sink is ready, False otherwise
-         */
-        virtual bool IsReady() const = 0;
-
-        /**
-         * @brief Default destructor
-         */
-        virtual ~IFrameOutput() = default;
-
-    };
-
-}// namespace od
diff --git a/samples/ObjectDetection/include/IFrameReader.hpp b/samples/ObjectDetection/include/IFrameReader.hpp
deleted file mode 100644
index d371b7d..0000000
--- a/samples/ObjectDetection/include/IFrameReader.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <cstddef>
-#include <memory>
-
-namespace od
-{
-/**
- * @brief Frame source reader interface
- *
- * @tparam FrameDataT frame container data type
- */
-template<typename FrameDataT> class IFrameReader
-{
-
-public:
-    /**
-     * @brief Reads the next frame from the source
-     *
-     * @return pointer to the frame container
-     */
-    virtual std::shared_ptr <FrameDataT> ReadFrame() = 0;
-
-    /**
-     * @brief Checks if the frame source has more frames to read.
-     *
-     * @param[in] frame the pointer to the last frame captured with the ReadFrame method could be used in
-     *                  implementation specific logic to check frames source state.
-     * @return True if frame source was exhausted, False otherwise
-     */
-    virtual bool IsExhausted(const std::shared_ptr <FrameDataT>& frame) const = 0;
-
-    /**
-     * @brief Default destructor
-     */
-    virtual ~IFrameReader() = default;
-
-};
-
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/ImageUtils.hpp b/samples/ObjectDetection/include/ImageUtils.hpp
index 07e2b83..9bae568 100644
--- a/samples/ObjectDetection/include/ImageUtils.hpp
+++ b/samples/ObjectDetection/include/ImageUtils.hpp
@@ -21,7 +21,7 @@
 */
 void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults,
                                cv::Mat& inputFrame,
-                               std::vector<std::tuple<std::string, od::BBoxColor>>& labels);
+                               std::vector<std::tuple<std::string, common::BBoxColor>>& labels);
 
 /**
 * @brief Function to resize a frame while keeping aspect ratio.
@@ -30,7 +30,7 @@
 * @param[out]  dest            the frame we want to resize into.
 * @param[in]  aspectRatio      aspect ratio to use when resizing.
 */
-void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const od::Size& aspectRatio);
+void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const common::Size& aspectRatio);
 
 /**
 * @brief Function to pad a frame.
@@ -49,7 +49,7 @@
  * @param cache operation requires intermediate data container.
  * @param destSize size of the destination frame
  */
-void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const od::Size& destSize);
+void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const common::Size& destSize);
 
 /**
 * @brief Function to retrieve the cv::scalar color from a RGB tuple.
diff --git a/samples/ObjectDetection/include/NetworkPipeline.hpp b/samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
similarity index 88%
rename from samples/ObjectDetection/include/NetworkPipeline.hpp
rename to samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
index c3408b4..38de65b 100644
--- a/samples/ObjectDetection/include/NetworkPipeline.hpp
+++ b/samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
@@ -27,7 +27,7 @@
      * @param executor - unique pointer to inference runner
      * @param decoder - unique pointer to inference results decoder
      */
-    ObjDetectionPipeline(std::unique_ptr<ArmnnNetworkExecutor> executor,
+    ObjDetectionPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                          std::unique_ptr<IDetectionResultDecoder> decoder);
 
     /**
@@ -48,7 +48,7 @@
      * @param[in] processed - input inference data. Data type should be aligned with input tensor.
      * @param[out] result - raw floating point inference results.
      */
-    virtual void Inference(const cv::Mat& processed, InferenceResults& result);
+    virtual void Inference(const cv::Mat& processed, common::InferenceResults<float>& result);
 
     /**
      * @brief Standard inference results post-processing implementation.
@@ -58,13 +58,13 @@
      * @param[in] inferenceResult - inference results to be decoded.
      * @param[in] callback - a function to be called after successful inference results decoding.
      */
-    virtual void PostProcessing(InferenceResults& inferenceResult,
+    virtual void PostProcessing(common::InferenceResults<float>& inferenceResult,
                                 const std::function<void (DetectedObjects)>& callback);
 
 protected:
-    std::unique_ptr<ArmnnNetworkExecutor> m_executor;
+    std::unique_ptr<common::ArmnnNetworkExecutor<float>> m_executor;
     std::unique_ptr<IDetectionResultDecoder> m_decoder;
-    Size m_inputImageSize{};
+    common::Size m_inputImageSize{};
     cv::Mat m_processedFrame;
 };
 
@@ -85,7 +85,7 @@
      * @param ClsThreshold[in] -  class probability threshold for decoding step
      * @param ObjectThreshold[in] - detected object score threshold for decoding step
      */
-    YoloV3Tiny(std::unique_ptr<ArmnnNetworkExecutor> executor,
+    YoloV3Tiny(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                float NMSThreshold, float ClsThreshold, float ObjectThreshold);
 
     /**
@@ -116,7 +116,7 @@
      * @param[in] - unique pointer to inference runner
      * @paramp[in] objectThreshold - detected object score threshold for decoding step
      */
-    MobileNetSSDv1(std::unique_ptr<ArmnnNetworkExecutor> executor,
+    MobileNetSSDv1(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                    float objectThreshold);
 
     /**
@@ -143,6 +143,6 @@
  *
  * @return unique pointer to object detection pipeline.
  */
-IPipelinePtr CreatePipeline(od::ODPipelineOptions& config);
+IPipelinePtr CreatePipeline(common::PipelineOptions& config);
 
 }// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/SSDResultDecoder.hpp b/samples/ObjectDetection/include/SSDResultDecoder.hpp
index 65afb8d..4c703c1 100644
--- a/samples/ObjectDetection/include/SSDResultDecoder.hpp
+++ b/samples/ObjectDetection/include/SSDResultDecoder.hpp
@@ -21,9 +21,9 @@
      */
     SSDResultDecoder(float ObjectThreshold);
 
-    DetectedObjects Decode(const InferenceResults& results,
-                           const Size& outputFrameSize,
-                           const Size& resizedFrameSize,
+    DetectedObjects Decode(const common::InferenceResults<float>& results,
+                           const common::Size& outputFrameSize,
+                           const common::Size& resizedFrameSize,
                            const std::vector<std::string>& labels) override;
 
 private:
diff --git a/samples/ObjectDetection/include/Types.hpp b/samples/ObjectDetection/include/Types.hpp
deleted file mode 100644
index 801cff3..0000000
--- a/samples/ObjectDetection/include/Types.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-#include <vector>
-#include <tuple>
-#include <armnn/BackendId.hpp>
-
-namespace od
-{
-
-struct Size
-{
-
-    uint32_t m_Width;
-    uint32_t m_Height;
-
-    Size() : Size(0, 0) {}
-
-    Size(uint32_t width, uint32_t height) :
-            m_Width{width}, m_Height{height} {}
-
-    Size(const Size& other)
-            : Size(other.m_Width, other.m_Height) {}
-
-    ~Size() = default;
-
-    Size &operator=(const Size& other) = default;
-};
-
-struct BBoxColor
-{
-    std::tuple<int, int, int> colorCode;
-};
-
-struct ODPipelineOptions
-{
-    std::string m_ModelName;
-    std::string m_ModelFilePath;
-    std::vector<armnn::BackendId> m_backends;
-};
-
-using InferenceResult = std::vector<float>;
-using InferenceResults = std::vector<InferenceResult>;
-}
\ No newline at end of file
diff --git a/samples/ObjectDetection/include/YoloResultDecoder.hpp b/samples/ObjectDetection/include/YoloResultDecoder.hpp
index 98435e3..ae6cb5e 100644
--- a/samples/ObjectDetection/include/YoloResultDecoder.hpp
+++ b/samples/ObjectDetection/include/YoloResultDecoder.hpp
@@ -26,9 +26,9 @@
      */
     YoloResultDecoder(float NMSThreshold, float ClsThreshold, float ObjectThreshold);
 
-    DetectedObjects Decode(const InferenceResults& results,
-                           const Size& outputFrameSize,
-                           const Size& resizedFrameSize,
+    DetectedObjects Decode(const common::InferenceResults<float>& results,
+                           const common::Size& outputFrameSize,
+                           const common::Size& resizedFrameSize,
                            const std::vector <std::string>& labels) override;
 private:
     float m_NmsThreshold;
diff --git a/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp b/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp
deleted file mode 100644
index cb4c0c9..0000000
--- a/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ArmnnNetworkExecutor.hpp"
-#include "Types.hpp"
-
-#include <random>
-#include <string>
-
-namespace od
-{
-
-armnn::DataType ArmnnNetworkExecutor::GetInputDataType() const
-{
-    return m_inputBindingInfo.second.GetDataType();
-}
-
-ArmnnNetworkExecutor::ArmnnNetworkExecutor(std::string& modelPath,
-                                           std::vector<armnn::BackendId>& preferredBackends)
-: m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
-{
-    // Import the TensorFlow lite model.
-    armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
-    armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
-
-    std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
-
-    m_inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
-
-    m_outputLayerNamesList = parser->GetSubgraphOutputTensorNames(0);
-
-    std::vector<armnn::BindingPointInfo> outputBindings;
-    for(const std::string& name : m_outputLayerNamesList)
-    {
-        m_outputBindingInfo.push_back(std::move(parser->GetNetworkOutputBindingInfo(0, name)));
-    }
-
-    std::vector<std::string> errorMessages;
-    // optimize the network.
-    armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
-                                                  preferredBackends,
-                                                  m_Runtime->GetDeviceSpec(),
-                                                  armnn::OptimizerOptions(),
-                                                  armnn::Optional<std::vector<std::string>&>(errorMessages));
-
-    if (!optNet)
-    {
-        const std::string errorMessage{"ArmnnNetworkExecutor: Failed to optimize network"};
-        ARMNN_LOG(error) << errorMessage;
-        throw armnn::Exception(errorMessage);
-    }
-
-    // Load the optimized network onto the m_Runtime device
-    std::string errorMessage;
-    if (armnn::Status::Success != m_Runtime->LoadNetwork(m_NetId, std::move(optNet), errorMessage))
-    {
-        ARMNN_LOG(error) << errorMessage;
-    }
-
-    //pre-allocate memory for output (the size of it never changes)
-    for (int it = 0; it < m_outputLayerNamesList.size(); ++it)
-    {
-        const armnn::DataType dataType = m_outputBindingInfo[it].second.GetDataType();
-        const armnn::TensorShape& tensorShape = m_outputBindingInfo[it].second.GetShape();
-
-        InferenceResult oneLayerOutResult;
-        switch (dataType)
-        {
-            case armnn::DataType::Float32:
-            {
-                oneLayerOutResult.resize(tensorShape.GetNumElements(), 0);
-                break;
-            }
-            default:
-            {
-                errorMessage = "ArmnnNetworkExecutor: unsupported output tensor data type";
-                ARMNN_LOG(error) << errorMessage << " " << log_as_int(dataType);
-                throw armnn::Exception(errorMessage);
-            }
-        }
-
-        m_OutputBuffer.emplace_back(oneLayerOutResult);
-
-        // Make ArmNN output tensors
-        m_OutputTensors.reserve(m_OutputBuffer.size());
-        for (size_t it = 0; it < m_OutputBuffer.size(); ++it)
-        {
-            m_OutputTensors.emplace_back(std::make_pair(
-                    m_outputBindingInfo[it].first,
-                    armnn::Tensor(m_outputBindingInfo[it].second,
-                                  m_OutputBuffer.at(it).data())
-            ));
-        }
-    }
-
-}
-
-void ArmnnNetworkExecutor::PrepareTensors(const void* inputData, const size_t dataBytes)
-{
-    assert(m_inputBindingInfo.second.GetNumBytes() >= dataBytes);
-    m_InputTensors.clear();
-    m_InputTensors = {{ m_inputBindingInfo.first, armnn::ConstTensor(m_inputBindingInfo.second, inputData)}};
-}
-
-bool ArmnnNetworkExecutor::Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults)
-{
-    /* Prepare tensors if they are not ready */
-    ARMNN_LOG(debug) << "Preparing tensors...";
-    this->PrepareTensors(inputData, dataBytes);
-    ARMNN_LOG(trace) << "Running inference...";
-
-    armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetId, m_InputTensors, m_OutputTensors);
-
-    std::stringstream inferenceFinished;
-    inferenceFinished << "Inference finished with code {" << log_as_int(ret) << "}\n";
-
-    ARMNN_LOG(trace) << inferenceFinished.str();
-
-    if (ret == armnn::Status::Failure)
-    {
-        ARMNN_LOG(error) << "Failed to perform inference.";
-    }
-
-    outResults.reserve(m_outputLayerNamesList.size());
-    outResults = m_OutputBuffer;
-
-    return (armnn::Status::Success == ret);
-}
-
-Size ArmnnNetworkExecutor::GetImageAspectRatio()
-{
-    const auto shape = m_inputBindingInfo.second.GetShape();
-    assert(shape.GetNumDimensions() == 4);
-    armnnUtils::DataLayoutIndexed nhwc(armnn::DataLayout::NHWC);
-    return Size(shape[nhwc.GetWidthIndex()],
-                shape[nhwc.GetHeightIndex()]);
-}
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/CmdArgsParser.cpp b/samples/ObjectDetection/src/CmdArgsParser.cpp
deleted file mode 100644
index b8c74bc..0000000
--- a/samples/ObjectDetection/src/CmdArgsParser.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CmdArgsParser.hpp"
-#include <iostream>
-/*
- * Checks that a particular option was specified by the user
- */
-bool CheckOptionSpecified(const std::map<std::string, std::string>& options, const std::string& option)
-{
-    auto it = options.find(option);
-    return it!=options.end();
-}
-
-/*
- * Retrieves the user provided option
- */
-std::string GetSpecifiedOption(const std::map<std::string, std::string>& options, const std::string& option)
-{
-    if (CheckOptionSpecified(options, option)){
-        return options.at(option);
-    }
-    else
-    {
-        throw std::invalid_argument("Required option: " + option + " not defined.");
-    }
-}
-
-/*
- * Parses all the command line options provided by the user and stores in a map.
- */
-int ParseOptions(std::map<std::string, std::string>& options, std::map<std::string, std::string>& acceptedOptions,
-                 char *argv[], int argc)
-{
-    for (int i = 1; i < argc; ++i)
-    {
-        std::string currentOption = std::string(argv[i]);
-        auto it = acceptedOptions.find(currentOption);
-        if (it != acceptedOptions.end())
-        {
-            if (i + 1 < argc && std::string(argv[i + 1]).rfind("--", 0) != 0)
-            {
-                std::string value = argv[++i];
-                options.insert({it->first, value});
-            }
-            else if (std::string(argv[i]) == HELP)
-            {
-                std::cout << "Available options" << std::endl;
-                for (auto & acceptedOption : acceptedOptions)
-                {
-                    std::cout << acceptedOption.first << " : " << acceptedOption.second << std::endl;
-                }
-                return 2;
-            }
-            else
-            {
-                std::cerr << std::string(argv[i]) << " option requires one argument." << std::endl;
-                return 1;
-            }
-        }
-        else
-        {
-            std::cerr << "Unrecognised option: " << std::string(argv[i]) << std::endl;
-            return 1;
-        }
-    }
-    return 0;
-}
diff --git a/samples/ObjectDetection/src/CvVideoFileWriter.cpp b/samples/ObjectDetection/src/CvVideoFileWriter.cpp
deleted file mode 100644
index ab80b95..0000000
--- a/samples/ObjectDetection/src/CvVideoFileWriter.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CvVideoFileWriter.hpp"
-
-namespace od
-{
-
-void CvVideoFileWriter::Init(const std::string& outputVideo, int encoding, double fps, int width, int height)
-{
-    m_ready = m_cvWriter.open(outputVideo, cv::CAP_FFMPEG,
-                              encoding,
-                              fps,
-                              cv::Size(width, height), true);
-}
-
-
-void CvVideoFileWriter::WriteFrame(std::shared_ptr<cv::Mat>& frame)
-{
-    if(m_cvWriter.isOpened())
-    {
-        cv::cvtColor(*frame, *frame, cv::COLOR_RGB2BGR);
-        m_cvWriter.write(*frame);
-    }
-}
-
-bool CvVideoFileWriter::IsReady() const
-{
-    return m_ready;
-}
-
-void CvVideoFileWriter::Close()
-{
-    m_cvWriter.release();
-}
-}// namespace od
diff --git a/samples/ObjectDetection/src/CvVideoFrameReader.cpp b/samples/ObjectDetection/src/CvVideoFrameReader.cpp
deleted file mode 100644
index 09b5050..0000000
--- a/samples/ObjectDetection/src/CvVideoFrameReader.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-
-#include "CvVideoFrameReader.hpp"
-
-namespace od
-{
-
-std::shared_ptr<cv::Mat> CvVideoFrameReader::ReadFrame()
-{
-    // opencv copies data anyway
-    cv::Mat captureFrame;
-    m_capture.read(captureFrame);
-    return std::make_shared<cv::Mat>(std::move(captureFrame));
-}
-
-bool CvVideoFrameReader::IsExhausted(const std::shared_ptr<cv::Mat>& frame) const
-{
-    assert(frame!=nullptr);
-    return frame->empty();
-}
-
-void CvVideoFrameReader::CheckIsOpen(const std::string& source)
-{
-    if (!m_capture.isOpened())
-    {
-        throw std::runtime_error("Failed to open video capture for the source = " + source);
-    }
-}
-
-void CvVideoFrameReader::Init(const std::string& source)
-{
-    m_capture.open(source);
-    CheckIsOpen(source);
-}
-
-int CvVideoFrameReader::GetSourceWidth() const
-{
-    return static_cast<int>(lround(m_capture.get(cv::CAP_PROP_FRAME_WIDTH)));
-}
-
-int CvVideoFrameReader::GetSourceHeight() const
-{
-    return static_cast<int>(lround(m_capture.get(cv::CAP_PROP_FRAME_HEIGHT)));
-}
-
-double CvVideoFrameReader::GetSourceFps() const
-{
-    return m_capture.get(cv::CAP_PROP_FPS);
-}
-
-bool CvVideoFrameReader::ConvertToRGB()
-{
-    m_capture.set(cv::CAP_PROP_CONVERT_RGB, 1.0);
-    return static_cast<bool>(m_capture.get(cv::CAP_PROP_CONVERT_RGB));
-}
-
-std::string CvVideoFrameReader::GetSourceEncoding() const
-{
-    char fourccStr[5];
-    auto fourcc = (int)m_capture.get(cv::CAP_PROP_FOURCC);
-    sprintf(fourccStr,"%c%c%c%c",fourcc & 0xFF, (fourcc >> 8) & 0xFF, (fourcc >> 16) & 0xFF, (fourcc >> 24) & 0xFF);
-    return fourccStr;
-}
-
-int CvVideoFrameReader::GetSourceEncodingInt() const
-{
-    return (int)m_capture.get(cv::CAP_PROP_FOURCC);
-}
-
-int CvVideoFrameReader::GetFrameCount() const
-{
-    return static_cast<int>(lround(m_capture.get(cv::CAP_PROP_FRAME_COUNT)));
-};
-
-std::shared_ptr<cv::Mat> CvVideoFrameReaderRgbWrapper::ReadFrame()
-{
-    auto framePtr = m_reader->ReadFrame();
-    if (!IsExhausted(framePtr))
-    {
-        cv::cvtColor(*framePtr, *framePtr, cv::COLOR_BGR2RGB);
-    }
-    return framePtr;
-}
-
-bool CvVideoFrameReaderRgbWrapper::IsExhausted(const std::shared_ptr<cv::Mat>& frame) const
-{
-    return m_reader->IsExhausted(frame);
-}
-
-CvVideoFrameReaderRgbWrapper::CvVideoFrameReaderRgbWrapper(std::unique_ptr<od::CvVideoFrameReader> reader):
-        m_reader(std::move(reader))
-{}
-
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/CvWindowOutput.cpp b/samples/ObjectDetection/src/CvWindowOutput.cpp
deleted file mode 100644
index a32147b..0000000
--- a/samples/ObjectDetection/src/CvWindowOutput.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CvWindowOutput.hpp"
-
-namespace od
-{
-
-void CvWindowOutput::Init(const std::string& windowName)
-{
-    m_windowName = windowName;
-    cv::namedWindow(m_windowName, cv::WINDOW_AUTOSIZE);
-}
-
-void CvWindowOutput::WriteFrame(std::shared_ptr<cv::Mat>& frame)
-{
-    cv::cvtColor(*frame, *frame, cv::COLOR_RGB2BGR);
-    cv::imshow( m_windowName, *frame);
-    cv::waitKey(30);
-}
-
-void CvWindowOutput::Close()
-{
-    cv::destroyWindow(m_windowName);
-}
-
-bool CvWindowOutput::IsReady() const
-{
-    return true;
-}
-}// namespace od
\ No newline at end of file
diff --git a/samples/ObjectDetection/src/ImageUtils.cpp b/samples/ObjectDetection/src/ImageUtils.cpp
index 9a3ed17..05b8a66 100644
--- a/samples/ObjectDetection/src/ImageUtils.cpp
+++ b/samples/ObjectDetection/src/ImageUtils.cpp
@@ -15,7 +15,7 @@
 }
 
 void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults, cv::Mat& inputFrame,
-                               std::vector<std::tuple<std::string, od::BBoxColor>>& labels)
+                               std::vector<std::tuple<std::string, common::BBoxColor>>& labels)
 {
     for(const od::DetectedObject& object : decodedResults)
     {
@@ -86,7 +86,7 @@
 }
 
 
-void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const od::Size& aspectRatio)
+void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const common::Size& aspectRatio)
 {
     if(&dest != &frame)
     {
@@ -119,7 +119,7 @@
     }
 }
 
-void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const od::Size& destSize)
+void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const common::Size& destSize)
 {
     ResizeFrame(frame, cache, destSize);
     PadFrame(cache, dest,destSize.m_Height - cache.rows,destSize.m_Width - cache.cols);
diff --git a/samples/ObjectDetection/src/Main.cpp b/samples/ObjectDetection/src/Main.cpp
index 10abb65..e057981 100644
--- a/samples/ObjectDetection/src/Main.cpp
+++ b/samples/ObjectDetection/src/Main.cpp
@@ -6,7 +6,7 @@
 #include "CvVideoFrameReader.hpp"
 #include "CvWindowOutput.hpp"
 #include "CvVideoFileWriter.hpp"
-#include "NetworkPipeline.hpp"
+#include "ObjectDetectionPipeline.hpp"
 #include "CmdArgsParser.hpp"
 
 #include <fstream>
@@ -14,6 +14,30 @@
 #include <map>
 #include <random>
 
+const std::string MODEL_NAME = "--model-name";
+const std::string VIDEO_FILE_PATH = "--video-file-path";
+const std::string MODEL_FILE_PATH = "--model-file-path";
+const std::string OUTPUT_VIDEO_FILE_PATH = "--output-video-file-path";
+const std::string LABEL_PATH = "--label-path";
+const std::string PREFERRED_BACKENDS = "--preferred-backends";
+const std::string HELP = "--help";
+
+/*
+ * The accepted options for this Object detection executable
+ */
+static std::map<std::string, std::string> CMD_OPTIONS = {
+        {VIDEO_FILE_PATH, "[REQUIRED] Path to the video file to run object detection on"},
+        {MODEL_FILE_PATH, "[REQUIRED] Path to the Object Detection model to use"},
+        {LABEL_PATH, "[REQUIRED] Path to the label set for the provided model file. "
+                     "Label file is should just be an ordered list, seperated by new line."},
+        {MODEL_NAME, "[REQUIRED] The name of the model being used. Accepted options: YOLO_V3_TINY, SSD_MOBILE"},
+        {OUTPUT_VIDEO_FILE_PATH, "[OPTIONAL] Path to the output video file with detections added in. "
+                                 "If specified will save file to disk, else displays the output to screen"},
+        {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
+                             " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
+                             " Defaults to CpuAcc,CpuRef"}
+};
+
 /*
  * Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
  */
@@ -34,10 +58,10 @@
 /*
  * Assigns a color to each label in the label set
  */
-std::vector<std::tuple<std::string, od::BBoxColor>> AssignColourToLabel(const std::string& pathToLabelFile)
+std::vector<std::tuple<std::string, common::BBoxColor>> AssignColourToLabel(const std::string& pathToLabelFile)
 {
     std::ifstream in(pathToLabelFile);
-    std::vector<std::tuple<std::string, od::BBoxColor>> labels;
+    std::vector<std::tuple<std::string, common::BBoxColor>> labels;
 
     std::string str;
     std::default_random_engine generator;
@@ -47,7 +71,7 @@
     {
         if(!str.empty())
         {
-            od::BBoxColor c{
+            common::BBoxColor c{
                 .colorCode = std::make_tuple(distribution(generator),
                                              distribution(generator),
                                              distribution(generator))
@@ -60,13 +84,13 @@
     return labels;
 }
 
-std::tuple<std::unique_ptr<od::IFrameReader<cv::Mat>>,
-           std::unique_ptr<od::IFrameOutput<cv::Mat>>>
+std::tuple<std::unique_ptr<common::IFrameReader<cv::Mat>>,
+           std::unique_ptr<common::IFrameOutput<cv::Mat>>>
            GetFrameSourceAndSink(const std::map<std::string, std::string>& options) {
 
-    std::unique_ptr<od::IFrameReader<cv::Mat>> readerPtr;
+    std::unique_ptr<common::IFrameReader<cv::Mat>> readerPtr;
 
-    std::unique_ptr<od::CvVideoFrameReader> reader = std::make_unique<od::CvVideoFrameReader>();
+    std::unique_ptr<common::CvVideoFrameReader> reader = std::make_unique<common::CvVideoFrameReader>();
     reader->Init(GetSpecifiedOption(options, VIDEO_FILE_PATH));
 
     auto enc = reader->GetSourceEncodingInt();
@@ -75,7 +99,7 @@
     auto h = reader->GetSourceHeight();
     if (!reader->ConvertToRGB())
     {
-        readerPtr = std::move(std::make_unique<od::CvVideoFrameReaderRgbWrapper>(std::move(reader)));
+        readerPtr = std::move(std::make_unique<common::CvVideoFrameReaderRgbWrapper>(std::move(reader)));
     }
     else
     {
@@ -85,14 +109,14 @@
     if(CheckOptionSpecified(options, OUTPUT_VIDEO_FILE_PATH))
     {
         std::string outputVideo = GetSpecifiedOption(options, OUTPUT_VIDEO_FILE_PATH);
-        auto writer = std::make_unique<od::CvVideoFileWriter>();
+        auto writer = std::make_unique<common::CvVideoFileWriter>();
         writer->Init(outputVideo, enc, fps, w, h);
 
         return std::make_tuple<>(std::move(readerPtr), std::move(writer));
     }
     else
     {
-        auto writer = std::make_unique<od::CvWindowOutput>();
+        auto writer = std::make_unique<common::CvWindowOutput>();
         writer->Init("Processed Video");
         return std::make_tuple<>(std::move(readerPtr), std::move(writer));
     }
@@ -109,7 +133,7 @@
     }
 
     // Create the network options
-    od::ODPipelineOptions pipelineOptions;
+    common::PipelineOptions pipelineOptions;
     pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
     pipelineOptions.m_ModelName = GetSpecifiedOption(options, MODEL_NAME);
 
@@ -127,8 +151,8 @@
     od::IPipelinePtr objectDetectionPipeline = od::CreatePipeline(pipelineOptions);
 
     auto inputAndOutput = GetFrameSourceAndSink(options);
-    std::unique_ptr<od::IFrameReader<cv::Mat>> reader = std::move(std::get<0>(inputAndOutput));
-    std::unique_ptr<od::IFrameOutput<cv::Mat>> sink = std::move(std::get<1>(inputAndOutput));
+    std::unique_ptr<common::IFrameReader<cv::Mat>> reader = std::move(std::get<0>(inputAndOutput));
+    std::unique_ptr<common::IFrameOutput<cv::Mat>> sink = std::move(std::get<1>(inputAndOutput));
 
     if (!sink->IsReady())
     {
@@ -136,7 +160,7 @@
         return 1;
     }
 
-    od::InferenceResults results;
+    common::InferenceResults<float> results;
 
     std::shared_ptr<cv::Mat> frame = reader->ReadFrame();
 
diff --git a/samples/ObjectDetection/src/NetworkPipeline.cpp b/samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
similarity index 82%
rename from samples/ObjectDetection/src/NetworkPipeline.cpp
rename to samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
index 7f05882..077caa4 100644
--- a/samples/ObjectDetection/src/NetworkPipeline.cpp
+++ b/samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
@@ -3,23 +3,23 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include "NetworkPipeline.hpp"
+#include "ObjectDetectionPipeline.hpp"
 #include "ImageUtils.hpp"
 
 namespace od
 {
 
-ObjDetectionPipeline::ObjDetectionPipeline(std::unique_ptr<ArmnnNetworkExecutor> executor,
+ObjDetectionPipeline::ObjDetectionPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                                            std::unique_ptr<IDetectionResultDecoder> decoder) :
         m_executor(std::move(executor)),
         m_decoder(std::move(decoder)){}
 
-void od::ObjDetectionPipeline::Inference(const cv::Mat& processed, InferenceResults& result)
+void od::ObjDetectionPipeline::Inference(const cv::Mat& processed, common::InferenceResults<float>& result)
 {
     m_executor->Run(processed.data, processed.total() * processed.elemSize(), result);
 }
 
-void ObjDetectionPipeline::PostProcessing(InferenceResults& inferenceResult,
+void ObjDetectionPipeline::PostProcessing(common::InferenceResults<float>& inferenceResult,
         const std::function<void (DetectedObjects)>& callback)
 {
     DetectedObjects detections = m_decoder->Decode(inferenceResult, m_inputImageSize,
@@ -37,7 +37,7 @@
     ResizeWithPad(frame, processed, m_processedFrame, m_executor->GetImageAspectRatio());
 }
 
-MobileNetSSDv1::MobileNetSSDv1(std::unique_ptr<ArmnnNetworkExecutor> executor,
+MobileNetSSDv1::MobileNetSSDv1(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                                float objectThreshold) :
         ObjDetectionPipeline(std::move(executor),
                              std::make_unique<SSDResultDecoder>(objectThreshold))
@@ -53,7 +53,7 @@
     }
 }
 
-YoloV3Tiny::YoloV3Tiny(std::unique_ptr<ArmnnNetworkExecutor> executor,
+YoloV3Tiny::YoloV3Tiny(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
                        float NMSThreshold, float ClsThreshold, float ObjectThreshold) :
         ObjDetectionPipeline(std::move(executor),
                              std::move(std::make_unique<YoloResultDecoder>(NMSThreshold,
@@ -70,9 +70,9 @@
     }
 }
 
-IPipelinePtr CreatePipeline(od::ODPipelineOptions& config)
+IPipelinePtr CreatePipeline(common::PipelineOptions& config)
 {
-    auto executor = std::make_unique<od::ArmnnNetworkExecutor>(config.m_ModelFilePath, config.m_backends);
+    auto executor = std::make_unique<common::ArmnnNetworkExecutor<float>>(config.m_ModelFilePath, config.m_backends);
 
     if (config.m_ModelName == "SSD_MOBILE")
     {
diff --git a/samples/ObjectDetection/src/SSDResultDecoder.cpp b/samples/ObjectDetection/src/SSDResultDecoder.cpp
index a331921..6dfd1ab 100644
--- a/samples/ObjectDetection/src/SSDResultDecoder.cpp
+++ b/samples/ObjectDetection/src/SSDResultDecoder.cpp
@@ -12,9 +12,9 @@
 namespace od
 {
 
-DetectedObjects SSDResultDecoder::Decode(const InferenceResults& networkResults,
-    const Size& outputFrameSize,
-    const Size& resizedFrameSize,
+DetectedObjects SSDResultDecoder::Decode(const common::InferenceResults<float>& networkResults,
+    const common::Size& outputFrameSize,
+    const common::Size& resizedFrameSize,
     const std::vector<std::string>& labels)
 {
     // SSD network outputs 4 tensors: bounding boxes, labels, probabilities, number of detections.
diff --git a/samples/ObjectDetection/src/YoloResultDecoder.cpp b/samples/ObjectDetection/src/YoloResultDecoder.cpp
index ffbf7cb..f177802 100644
--- a/samples/ObjectDetection/src/YoloResultDecoder.cpp
+++ b/samples/ObjectDetection/src/YoloResultDecoder.cpp
@@ -13,9 +13,9 @@
 namespace od
 {
 
-DetectedObjects YoloResultDecoder::Decode(const InferenceResults& networkResults,
-                                         const Size& outputFrameSize,
-                                         const Size& resizedFrameSize,
+DetectedObjects YoloResultDecoder::Decode(const common::InferenceResults<float>& networkResults,
+                                         const common::Size& outputFrameSize,
+                                         const common::Size& resizedFrameSize,
                                          const std::vector<std::string>& labels)
 {
 
@@ -33,7 +33,7 @@
     DetectedObjects detectedObjects;
     DetectedObjects resultsAfterNMS;
 
-    for (const InferenceResult& result : networkResults)
+    for (const common::InferenceResult<float>& result : networkResults)
     {
         for (unsigned int i = 0; i < m_numBoxes; ++i)
         {
diff --git a/samples/ObjectDetection/test/FrameReaderTest.cpp b/samples/ObjectDetection/test/FrameReaderTest.cpp
index a4bda22..a02fa7f 100644
--- a/samples/ObjectDetection/test/FrameReaderTest.cpp
+++ b/samples/ObjectDetection/test/FrameReaderTest.cpp
@@ -20,7 +20,7 @@
         std::string file =  testResources + "/" + "Megamind.avi";
         WHEN("Frame reader is initialised") {
 
-            od::CvVideoFrameReader reader;
+            common::CvVideoFrameReader reader;
             THEN("no exception is thrown") {
                 reader.Init(file);
 
@@ -92,7 +92,7 @@
 
         WHEN("Frame reader is initialised") {
 
-            od::CvVideoFrameReader reader;
+            common::CvVideoFrameReader reader;
 
             THEN("exception is thrown") {
                 REQUIRE_THROWS(reader.Init(file));
diff --git a/samples/ObjectDetection/test/ImageUtilsTest.cpp b/samples/ObjectDetection/test/ImageUtilsTest.cpp
index e486ae1..4490cff 100644
--- a/samples/ObjectDetection/test/ImageUtilsTest.cpp
+++ b/samples/ObjectDetection/test/ImageUtilsTest.cpp
@@ -96,9 +96,9 @@
 
     std::string testResources = TEST_RESOURCE_DIR;
     REQUIRE(testResources != "");
-    std::vector<std::tuple<std::string, od::BBoxColor>> labels;
+    std::vector<std::tuple<std::string, common::BBoxColor>> labels;
 
-    od::BBoxColor c
+    common::BBoxColor c
     {
         .colorCode = std::make_tuple (0, 0, 255)
     };
diff --git a/samples/ObjectDetection/test/PipelineTest.cpp b/samples/ObjectDetection/test/PipelineTest.cpp
index 289f44f..bc5824e 100644
--- a/samples/ObjectDetection/test/PipelineTest.cpp
+++ b/samples/ObjectDetection/test/PipelineTest.cpp
@@ -4,7 +4,7 @@
 //
 #include <catch.hpp>
 #include <opencv2/opencv.hpp>
-#include <NetworkPipeline.hpp>
+#include "ObjectDetectionPipeline.hpp"
 #include "Types.hpp"
 
 static std::string GetResourceFilePath(const std::string& filename)
@@ -32,14 +32,14 @@
     std::string testResources = TEST_RESOURCE_DIR;
     REQUIRE(testResources != "");
     // Create the network options
-    od::ODPipelineOptions options;
+    common::PipelineOptions options;
     options.m_ModelFilePath = GetResourceFilePath("detect.tflite");
     options.m_ModelName = "SSD_MOBILE";
     options.m_backends = {"CpuAcc", "CpuRef"};
 
     od::IPipelinePtr objectDetectionPipeline = od::CreatePipeline(options);
 
-    od::InferenceResults results;
+    common::InferenceResults<float> results;
     cv::Mat processed;
     cv::Mat inputFrame = cv::imread(GetResourceFilePath("basketball1.png"), cv::IMREAD_COLOR);
     cv::cvtColor(inputFrame, inputFrame, cv::COLOR_BGR2RGB);