MLECO-2492 Add CPP OD example with TFLITE-ArmnnDelegate

Signed-off-by: Dvir Markovich <dvir.markovich@arm.com>
Change-Id: If412c15ba49abe8370a570260b0a8ed8de305b7c
diff --git a/samples/common/cmake/find_armnn.cmake b/samples/common/cmake/find_armnn.cmake
index 289e912..35f87eb 100644
--- a/samples/common/cmake/find_armnn.cmake
+++ b/samples/common/cmake/find_armnn.cmake
@@ -2,8 +2,13 @@
 # SPDX-License-Identifier: MIT
 # Search for ArmNN built libraries in user-provided path first, then current repository, then system
 
-set(ARMNN_LIB_NAMES "libarmnn.so"
-    "libarmnnTfLiteParser.so")
+if( USE_ARMNN_DELEGATE )
+    set(ARMNN_LIB_NAMES "libarmnn.so"
+        "libarmnnDelegate.so")
+else()
+    set(ARMNN_LIB_NAMES "libarmnn.so"
+        "libarmnnTfLiteParser.so")
+endif()
 
 set(ARMNN_LIBS "")
 
@@ -26,7 +31,13 @@
         list(APPEND ARMNN_LIBS ${ARMNN_${armnn_lib}})
         get_filename_component(LIB_DIR ${ARMNN_${armnn_lib}} DIRECTORY)
         get_filename_component(LIB_PARENT_DIR ${LIB_DIR} DIRECTORY)
-        set(ARMNN_INCLUDE_DIR ${LIB_PARENT_DIR}/include)
+        if( USE_ARMNN_DELEGATE )
+            set(ARMNN_INCLUDE_DIR ${LIB_PARENT_DIR}/include
+                ${PARENT_DIR}/../delegate/include
+                ${PARENT_DIR}/../delegate/src)
+        else()
+            set(ARMNN_INCLUDE_DIR ${LIB_PARENT_DIR}/include)
+        endif()
     endif()
 endforeach()
 
diff --git a/samples/common/cmake/find_catch.cmake b/samples/common/cmake/find_catch.cmake
index 584b807..f55654e 100644
--- a/samples/common/cmake/find_catch.cmake
+++ b/samples/common/cmake/find_catch.cmake
@@ -8,9 +8,10 @@
 file(MAKE_DIRECTORY ${TEST_TPIP_INCLUDE})
 
 ExternalProject_Add(catch2-headers
-    URL https://github.com/catchorg/Catch2/releases/download/v2.11.1/catch.hpp
+    URL https://github.com/catchorg/Catch2/releases/download/v2.13.5/catch.hpp
+    URL_HASH MD5=b43c586fe617aefdee3e480e9fa8f370
     DOWNLOAD_NO_EXTRACT 1
     CONFIGURE_COMMAND ""
     BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/catch.hpp ${TEST_TPIP_INCLUDE}
     INSTALL_COMMAND ""
-    )
\ No newline at end of file
+    )
diff --git a/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp b/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp
index 9f1ef54..80558d8 100644
--- a/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp
+++ b/samples/common/include/ArmnnUtils/ArmnnNetworkExecutor.hpp
@@ -11,6 +11,7 @@
 #include "armnnTfLiteParser/ITfLiteParser.hpp"
 #include "armnnUtils/DataLayoutIndexed.hpp"
 #include <armnn/Logging.hpp>
+#include "Profiling.hpp"
 
 #include <string>
 #include <vector>
@@ -21,7 +22,7 @@
 * @brief Used to load in a network through ArmNN and run inference on it against a given backend.
 *
 */
-template <class Tout>
+template <typename Tout>
 class ArmnnNetworkExecutor
 {
 private:
@@ -31,7 +32,7 @@
     armnn::InputTensors     m_InputTensors;
     armnn::OutputTensors    m_OutputTensors;
     std::vector<armnnTfLiteParser::BindingPointInfo> m_outputBindingInfo;
-
+    Profiling m_profiling;
     std::vector<std::string> m_outputLayerNamesList;
 
     armnnTfLiteParser::BindingPointInfo m_inputBindingInfo;
@@ -59,7 +60,8 @@
     *       * @param[in] backends - The list of preferred backends to run inference on
     */
     ArmnnNetworkExecutor(std::string& modelPath,
-                         std::vector<armnn::BackendId>& backends);
+                         std::vector<armnn::BackendId>& backends,
+                         bool isProfilingEnabled = false);
 
     /**
     * @brief Returns the aspect ratio of the associated model in the order of width, height.
@@ -87,12 +89,15 @@
 
 };
 
-template <class Tout>
+template <typename Tout>
 ArmnnNetworkExecutor<Tout>::ArmnnNetworkExecutor(std::string& modelPath,
-                                           std::vector<armnn::BackendId>& preferredBackends)
-        : m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
+                                           std::vector<armnn::BackendId>& preferredBackends,
+                                           bool isProfilingEnabled):
+        m_profiling(isProfilingEnabled),
+        m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
 {
     // Import the TensorFlow lite model.
+    m_profiling.ProfilingStart();
     armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
     armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
 
@@ -151,16 +156,16 @@
             ));
         }
     }
-
+    m_profiling.ProfilingStopAndPrintUs("ArmnnNetworkExecutor time");
 }
 
-template <class Tout>
+template <typename Tout>
 armnn::DataType ArmnnNetworkExecutor<Tout>::GetInputDataType() const
 {
     return m_inputBindingInfo.second.GetDataType();
 }
 
-template <class Tout>
+template <typename Tout>
 void ArmnnNetworkExecutor<Tout>::PrepareTensors(const void* inputData, const size_t dataBytes)
 {
     assert(m_inputBindingInfo.second.GetNumBytes() >= dataBytes);
@@ -168,9 +173,10 @@
     m_InputTensors = {{ m_inputBindingInfo.first, armnn::ConstTensor(m_inputBindingInfo.second, inputData)}};
 }
 
-template <class Tout>
+template <typename Tout>
 bool ArmnnNetworkExecutor<Tout>::Run(const void* inputData, const size_t dataBytes, InferenceResults<Tout>& outResults)
 {
+    m_profiling.ProfilingStart();
     /* Prepare tensors if they are not ready */
     ARMNN_LOG(debug) << "Preparing tensors...";
     this->PrepareTensors(inputData, dataBytes);
@@ -190,37 +196,37 @@
 
     outResults.reserve(m_outputLayerNamesList.size());
     outResults = m_OutputBuffer;
-
+    m_profiling.ProfilingStopAndPrintUs("Total inference time");
     return (armnn::Status::Success == ret);
 }
 
-template <class Tout>
+template <typename Tout>
 float ArmnnNetworkExecutor<Tout>::GetQuantizationScale()
 {
     return this->m_inputBindingInfo.second.GetQuantizationScale();
 }
 
-template <class Tout>
+template <typename Tout>
 int ArmnnNetworkExecutor<Tout>::GetQuantizationOffset()
 {
     return this->m_inputBindingInfo.second.GetQuantizationOffset();
 }
 
-template <class Tout>
+template <typename Tout>
 float ArmnnNetworkExecutor<Tout>::GetOutputQuantizationScale(int tensorIndex)
 {
     assert(this->m_outputLayerNamesList.size() > tensorIndex);
     return this->m_outputBindingInfo[tensorIndex].second.GetQuantizationScale();
 }
 
-template <class Tout>
+template <typename Tout>
 int ArmnnNetworkExecutor<Tout>::GetOutputQuantizationOffset(int tensorIndex)
 {
     assert(this->m_outputLayerNamesList.size() > tensorIndex);
     return this->m_outputBindingInfo[tensorIndex].second.GetQuantizationOffset();
 }
 
-template <class Tout>
+template <typename Tout>
 Size ArmnnNetworkExecutor<Tout>::GetImageAspectRatio()
 {
     const auto shape = m_inputBindingInfo.second.GetShape();
diff --git a/samples/common/include/Utils/Profiling.hpp b/samples/common/include/Utils/Profiling.hpp
new file mode 100644
index 0000000..cca5632
--- /dev/null
+++ b/samples/common/include/Utils/Profiling.hpp
@@ -0,0 +1,90 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+#include <chrono>
+#include <iostream>
+#include <string>
+
+using namespace std::chrono;
+
+namespace common
+{
+/**
+* @brief Used for meausuring performance of specific actions in the code.
+ * Profiling should be enabled with a parameter passed to the constructor and
+ * it's disabled by default.
+ * In order to measure timing, wrap the desired code section with
+ * ProfilingStart() and ProfilingStopAndPrintUs(title)
+*/
+class Profiling {
+private:
+
+    struct group_thousands : std::numpunct<char>
+    {
+        std::string do_grouping() const override { return "\3"; }
+    };
+
+    bool mProfilingEnabled{};
+    steady_clock::time_point mStart{};
+    steady_clock::time_point mStop{};
+public:
+    Profiling() : mProfilingEnabled(false) {};
+
+    /**
+    * @brief Initializes the profiling object.
+    *
+    *       * @param[in] isEnabled - Enables the profiling computation and prints.
+    */
+    explicit Profiling(bool isEnabled) : mProfilingEnabled(isEnabled) {};
+
+/**
+* @brief Starts the profiling measurement.
+*
+*/
+
+    void ProfilingStart()
+    {
+        if (mProfilingEnabled)
+        {
+            mStart = steady_clock::now();
+        }
+    }
+
+/**
+* @brief Stops the profiling measurement, without printing the results.
+*
+*/
+    auto ProfilingStop()
+    {
+        if (mProfilingEnabled)
+        {
+            mStop = steady_clock::now();
+        }
+    }
+
+/**
+* @brief Get the measurement result in micro-seconds.
+*
+*/
+    auto ProfilingGetUs()
+    {
+        return mProfilingEnabled ? duration_cast<microseconds>(mStop - mStart).count() : 0;
+    }
+
+/**
+* @brief Stop the profiling measurement and print the result in micro-seconds.
+*
+*/
+    void ProfilingStopAndPrintUs(const std::string &title)
+    {
+        ProfilingStop();
+        if (mProfilingEnabled) {
+            std::cout.imbue(std::locale(std::cout.getloc(), new group_thousands));
+            std::cout << "Profiling: " << title << ": " << ProfilingGetUs() << " uSeconds" << std::endl;
+        }
+    }
+};
+}// namespace common
\ No newline at end of file
diff --git a/samples/common/include/Utils/Types.hpp b/samples/common/include/Utils/Types.hpp
index 4d1f708..184e02a 100644
--- a/samples/common/include/Utils/Types.hpp
+++ b/samples/common/include/Utils/Types.hpp
@@ -44,6 +44,7 @@
     std::string m_ModelName;
     std::string m_ModelFilePath;
     std::vector<armnn::BackendId> m_backends;
+    bool m_ProfilingEnabled = false;
 };
 
 template<typename T>