MLECO-2082: Adding visual wake word use case
MLECO-2083: Refactoring img_class and visual wake word

*Added source files for visual wake word
*Added tests
*Added docs
*Added new images for visual wake word demo
*Refactored common functions in img_class, visual wake word and other usecases

Change-Id: Ibd25854e19a5517f940a8d3086a5d4835fab89e9
Signed-off-by: Éanna Ó Catháin <eanna.ocathain@arm.com>
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
index 46fc2e5..ceaff7d 100644
--- a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
@@ -72,7 +72,7 @@
      * instead placed on BRAM. See comment in the
      * BRAM section for details.
      **/
-    *(EXCLUDE_FILE(*all_ops_resolver.o) .text*)
+    *(EXCLUDE_FILE(*all_ops_resolver.o *hal.c.obj) .text*)
 
     KEEP(*(.init))
     KEEP(*(.fini))
@@ -221,6 +221,8 @@
      **/
     *all_ops_resolver.o (*.text*)
     . = ALIGN(4);
+    *hal.c.obj (*.text*)
+    . = ALIGN(4);
 
     __data_end__ = .;
   } > BRAM
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld b/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld
index 8bb99cd..ceaff7d 100644
--- a/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld
@@ -65,7 +65,14 @@
   .text.at_itcm :
   {
     KEEP(*(.vectors))
-    *(.text*)
+
+    /**
+     * All code goes here, with one exception of
+     * all_ops_resolver object file. This code
+     * instead placed on BRAM. See comment in the
+     * BRAM section for details.
+     **/
+    *(EXCLUDE_FILE(*all_ops_resolver.o *hal.c.obj) .text*)
 
     KEEP(*(.init))
     KEEP(*(.fini))
@@ -87,11 +94,6 @@
     KEEP(*(.eh_frame*))
   } > ITCM
 
-  .ARM.extab.at_itcm :
-  {
-    *(.ARM.extab* .gnu.linkonce.armextab.*)
-  } > ITCM
-
   __exidx_start = .;
   .ARM.exidx.at_itcm :
   {
@@ -208,6 +210,20 @@
     KEEP(*(.jcr*))
     . = ALIGN(4);
 
+    *(.ARM.extab* .gnu.linkonce.armextab.*)
+    . = ALIGN(4);
+
+    /**
+     * Place the all ops resolver code data here. This accounts
+     * for ~4k worth of saving on the ITCM load region. It is
+     * only designed to be included (by default) for the inference
+     * runner use case.
+     **/
+    *all_ops_resolver.o (*.text*)
+    . = ALIGN(4);
+    *hal.c.obj (*.text*)
+    . = ALIGN(4);
+
     __data_end__ = .;
   } > BRAM
 
diff --git a/source/application/main/UseCaseCommonUtils.cc b/source/application/main/UseCaseCommonUtils.cc
index 615f684..9834475 100644
--- a/source/application/main/UseCaseCommonUtils.cc
+++ b/source/application/main/UseCaseCommonUtils.cc
@@ -15,91 +15,230 @@
  * limitations under the License.
  */
 #include "UseCaseCommonUtils.hpp"
-
 #include "InputFiles.hpp"
-
 #include <inttypes.h>
 
+
+void DisplayCommonMenu()
+{
+    printf("\n\n");
+    printf("User input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Classify next ifm\n", common::MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Classify ifm at chosen index\n", common::MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run classification on all ifm\n", common::MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", common::MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List ifm\n\n", common::MENU_OPT_LIST_IFM);
+    printf("  Choice: ");
+    fflush(stdout);
+}
+
+void image::ConvertImgToInt8(void* data, const size_t kMaxImageSize)
+{
+    auto* tmp_req_data = (uint8_t*) data;
+    auto* tmp_signed_req_data = (int8_t*) data;
+
+    for (size_t i = 0; i < kMaxImageSize; i++) {
+        tmp_signed_req_data[i] = (int8_t) (
+            (int32_t) (tmp_req_data[i]) - 128);
+    }
+}
+
+bool image::PresentInferenceResult(hal_platform& platform,
+                                       const std::vector<arm::app::ClassificationResult>& results)
+{
+    return PresentInferenceResult(platform, results, false);
+}
+
+bool image::PresentInferenceResult(hal_platform &platform,
+                                   const std::vector<arm::app::ClassificationResult> &results,
+                                   const time_t infTimeMs)
+{
+    return PresentInferenceResult(platform, results, true, infTimeMs);
+}
+
+
+bool image::PresentInferenceResult(hal_platform &platform,
+                                        const std::vector<arm::app::ClassificationResult> &results,
+                                        bool profilingEnabled,
+                                        const time_t infTimeMs)
+{
+    constexpr uint32_t dataPsnTxtStartX1 = 150;
+    constexpr uint32_t dataPsnTxtStartY1 = 30;
+
+    constexpr uint32_t dataPsnTxtStartX2 = 10;
+    constexpr uint32_t dataPsnTxtStartY2 = 150;
+
+    constexpr uint32_t dataPsnTxtYIncr = 16;  /* Row index increment. */
+
+    if(profilingEnabled)
+    {
+        platform.data_psn->set_text_color(COLOR_YELLOW);
+
+        /* If profiling is enabled, and the time is valid. */
+        info("Final results:\n");
+        info("Total number of inferences: 1\n");
+        if (infTimeMs)
+        {
+            std::string strInf =
+                    std::string{"Inference: "} +
+                    std::to_string(infTimeMs) +
+                    std::string{"ms"};
+            platform.data_psn->present_data_text(
+                    strInf.c_str(), strInf.size(),
+                    dataPsnTxtStartX1, dataPsnTxtStartY1, 0);
+        }
+    }
+    platform.data_psn->set_text_color(COLOR_GREEN);
+
+    /* Display each result. */
+    uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+    uint32_t rowIdx2 = dataPsnTxtStartY2;
+
+    if(!profilingEnabled)
+    {
+        info("Final results:\n");
+        info("Total number of inferences: 1\n");
+    }
+
+    for (uint32_t i = 0; i < results.size(); ++i) {
+        std::string resultStr =
+                std::to_string(i + 1) + ") " +
+                std::to_string(results[i].m_labelIdx) +
+                " (" + std::to_string(results[i].m_normalisedVal) + ")";
+
+        platform.data_psn->present_data_text(
+                resultStr.c_str(), resultStr.size(),
+                dataPsnTxtStartX1, rowIdx1, 0);
+        rowIdx1 += dataPsnTxtYIncr;
+
+        resultStr = std::to_string(i + 1) + ") " + results[i].m_label;
+        platform.data_psn->present_data_text(
+                resultStr.c_str(), resultStr.size(),
+                dataPsnTxtStartX2, rowIdx2, 0);
+        rowIdx2 += dataPsnTxtYIncr;
+
+        if(profilingEnabled)
+        {
+            info("%" PRIu32 ") %" PRIu32 " (%f) -> %s\n", i, results[i].m_labelIdx,
+                 results[i].m_normalisedVal, results[i].m_label.c_str());
+        }
+        else
+        {
+            info("%" PRIu32 ") %" PRIu32 " (%f) -> %s\n", i,
+                    results[i].m_labelIdx, results[i].m_normalisedVal,
+                    results[i].m_label.c_str());
+        }
+    }
+
+    return true;
+}
+
+void IncrementAppCtxIfmIdx(arm::app::ApplicationContext& ctx, std::string useCase)
+{
+    auto curImIdx = ctx.Get<uint32_t>(useCase);
+
+    if (curImIdx + 1 >= NUMBER_OF_FILES) {
+        ctx.Set<uint32_t>(useCase, 0);
+        return;
+    }
+    ++curImIdx;
+    ctx.Set<uint32_t>(useCase, curImIdx);
+}
+
+bool SetAppCtxIfmIdx(arm::app::ApplicationContext& ctx, uint32_t idx, std::string ctxIfmName)
+{
+    if (idx >= NUMBER_OF_FILES) {
+        printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
+                   idx, NUMBER_OF_FILES);
+        return false;
+    }
+    ctx.Set<uint32_t>(ctxIfmName, idx);
+    return true;
+}
+
+
 namespace arm {
 namespace app {
 
-    bool RunInference(arm::app::Model& model, Profiler& profiler)
-    {
-        profiler.StartProfiling("Inference");
-        bool runInf = model.RunInference();
-        profiler.StopProfiling();
 
-        return runInf;
+bool RunInference(arm::app::Model& model, Profiler& profiler)
+{
+    profiler.StartProfiling("Inference");
+    bool runInf = model.RunInference();
+    profiler.StopProfiling();
+
+    return runInf;
+}
+
+int ReadUserInputAsInt(hal_platform& platform)
+{
+    char chInput[128];
+    memset(chInput, 0, sizeof(chInput));
+
+    platform.data_acq->get_input(chInput, sizeof(chInput));
+    return atoi(chInput);
+}
+
+void DumpTensorData(const uint8_t* tensorData,
+                    size_t size,
+                    size_t lineBreakForNumElements)
+{
+    char strhex[8];
+    std::string strdump;
+
+    for (size_t i = 0; i < size; ++i) {
+        if (0 == i % lineBreakForNumElements) {
+            printf("%s\n\t", strdump.c_str());
+            strdump.clear();
+        }
+        snprintf(strhex, sizeof(strhex) - 1,
+                 "0x%02x, ", tensorData[i]);
+        strdump += std::string(strhex);
     }
 
-    int ReadUserInputAsInt(hal_platform& platform)
-    {
-        char chInput[128];
-        memset(chInput, 0, sizeof(chInput));
+    if (!strdump.empty()) {
+        printf("%s\n", strdump.c_str());
+    }
+}
 
-        platform.data_acq->get_input(chInput, sizeof(chInput));
-        return atoi(chInput);
+void DumpTensor(const TfLiteTensor* tensor, const size_t lineBreakForNumElements)
+{
+    if (!tensor) {
+        printf_err("invalid tensor\n");
+        return;
     }
 
-    void DumpTensorData(const uint8_t* tensorData,
-                        size_t size,
-                        size_t lineBreakForNumElements)
-        {
-            char strhex[8];
-            std::string strdump;
+    const uint32_t tensorSz = tensor->bytes;
+    const uint8_t* tensorData = tflite::GetTensorData<uint8_t>(tensor);
 
-            for (size_t i = 0; i < size; ++i) {
-                if (0 == i % lineBreakForNumElements) {
-                    printf("%s\n\t", strdump.c_str());
-                    strdump.clear();
-                }
-                snprintf(strhex, sizeof(strhex) - 1,
-                         "0x%02x, ", tensorData[i]);
-                strdump += std::string(strhex);
-            }
+    DumpTensorData(tensorData, tensorSz, lineBreakForNumElements);
+}
 
-            if (!strdump.empty()) {
-                printf("%s\n", strdump.c_str());
-            }
-        }
+bool ListFilesHandler(ApplicationContext& ctx)
+{
+    auto& model = ctx.Get<Model&>("model");
+    auto& platform = ctx.Get<hal_platform&>("platform");
 
-    void DumpTensor(const TfLiteTensor* tensor, const size_t lineBreakForNumElements)
-    {
-        if (!tensor) {
-            printf_err("invalid tensor\n");
-            return;
-        }
+    constexpr uint32_t dataPsnTxtStartX = 20;
+    constexpr uint32_t dataPsnTxtStartY = 40;
 
-        const uint32_t tensorSz = tensor->bytes;
-        const uint8_t* tensorData = tflite::GetTensorData<uint8_t>(tensor);
-
-        DumpTensorData(tensorData, tensorSz, lineBreakForNumElements);
+    if (!model.IsInited()) {
+        printf_err("Model is not initialised! Terminating processing.\n");
+        return false;
     }
 
-    bool ListFilesHandler(ApplicationContext& ctx)
-    {
-        auto& model = ctx.Get<Model&>("model");
-        auto& platform = ctx.Get<hal_platform&>("platform");
+    /* Clear the LCD */
+    platform.data_psn->clear(COLOR_BLACK);
 
-        constexpr uint32_t dataPsnTxtStartX = 20;
-        constexpr uint32_t dataPsnTxtStartY = 40;
-
-        if (!model.IsInited()) {
-            printf_err("Model is not initialised! Terminating processing.\n");
-            return false;
-        }
-
-        /* Clear the LCD */
-        platform.data_psn->clear(COLOR_BLACK);
-
-        /* Show the total number of embedded files. */
-        std::string strNumFiles = std::string{"Total Number of Files: "} +
-                                   std::to_string(NUMBER_OF_FILES);
-        platform.data_psn->present_data_text(strNumFiles.c_str(),
-                                             strNumFiles.size(),
-                                             dataPsnTxtStartX,
-                                             dataPsnTxtStartY,
-                                             false);
+    /* Show the total number of embedded files. */
+    std::string strNumFiles = std::string{"Total Number of Files: "} +
+                               std::to_string(NUMBER_OF_FILES);
+    platform.data_psn->present_data_text(strNumFiles.c_str(),
+                                         strNumFiles.size(),
+                                         dataPsnTxtStartX,
+                                         dataPsnTxtStartY,
+                                         false);
 
 #if NUMBER_OF_FILES > 0
         constexpr uint32_t dataPsnTxtYIncr = 16;
@@ -117,7 +256,7 @@
 #endif /* NUMBER_OF_FILES > 0 */
 
         return true;
-    }
+}
 
 } /* namespace app */
 } /* namespace arm */
\ No newline at end of file
diff --git a/source/application/main/include/UseCaseCommonUtils.hpp b/source/application/main/include/UseCaseCommonUtils.hpp
index 0af22f3..a3b606d 100644
--- a/source/application/main/include/UseCaseCommonUtils.hpp
+++ b/source/application/main/include/UseCaseCommonUtils.hpp
@@ -21,6 +21,11 @@
 #include "Model.hpp"
 #include "AppContext.hpp"
 #include "Profiler.hpp"
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include "Classifier.hpp"           /* Classifier. */
+#include "InputFiles.hpp"
+#include <inttypes.h>
+
 
 /* Helper macro to convert RGB888 to RGB565 format. */
 #define RGB888_TO_RGB565(R8,G8,B8)  ((((R8>>3) & 0x1F) << 11) |     \
@@ -31,9 +36,86 @@
 constexpr uint16_t COLOR_GREEN  = RGB888_TO_RGB565(  0, 255,  0); // 2016;
 constexpr uint16_t COLOR_YELLOW = RGB888_TO_RGB565(255, 255,  0); // 65504;
 
+
+void DisplayCommonMenu();
+
+namespace image{
+
+  /**
+  * @brief           Helper function to convert a UINT8 image to INT8 format.
+  * @param[in,out]   data            Pointer to the data start.
+  * @param[in]       kMaxImageSize   Total number of pixels in the image.
+  **/
+  void ConvertImgToInt8(void * data, size_t kMaxImageSize);
+
+  /**
+   * @brief           Presents inference results using the data presentation
+   *                  object.
+   * @param[in]       platform    Reference to the hal platform object.
+   * @param[in]       results     Vector of classification results to be displayed.
+   * @return          true if successful, false otherwise.
+   **/
+  bool PresentInferenceResult(hal_platform & platform,
+    const std::vector < arm::app::ClassificationResult > & results);
+
+
+  /**
+   * @brief           Presents inference results along with the inference time using the data presentation
+   *                  object.
+   * @param[in]       platform    Reference to the hal platform object.
+   * @param[in]       results     Vector of classification results to be displayed.
+   * @param[in]       results     Inference time in ms.
+   * @return          true if successful, false otherwise.
+   **/
+  bool PresentInferenceResult(hal_platform & platform,
+    const std::vector < arm::app::ClassificationResult > & results,
+      const time_t infTimeMs);
+
+  /**
+  * @brief           Presents inference results along with the inference time using the data presentation
+  *                  object.
+  * @param[in]       platform    Reference to the hal platform object.
+  * @param[in]       results     Vector of classification results to be displayed.
+  * @param[in]       results     Inference time in ms.
+  * @return          true if successful, false otherwise.
+  **/
+  bool PresentInferenceResult(hal_platform & platform,
+                              const std::vector < arm::app::ClassificationResult > & results,
+                              bool profilingEnabled,
+                              const time_t infTimeMs = 0);
+  }
+
+/**
+   * @brief           Helper function to increment current input feature vector index.
+   * @param[in,out]   ctx       Pointer to the application context object.
+   * @param[in]       useCase   Use case name
+   **/
+void IncrementAppCtxIfmIdx(arm::app::ApplicationContext& ctx, std::string useCase);
+
+/**
+   * @brief           Helper function to set the input feature map index.
+   * @param[in,out]   ctx          Pointer to the application context object.
+   * @param[in]       idx          Value to be set.
+   * @param[in]       ctxIfmName   Input Feature Map name
+   * @return          true if index is set, false otherwise.
+   **/
+bool SetAppCtxIfmIdx(arm::app::ApplicationContext& ctx, uint32_t idx, std::string ctxIfmName);
+
+
+namespace common {
+
+  enum OPCODES {
+        MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
+        MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index. */
+        MENU_OPT_RUN_INF_ALL, /* Run inference on all. */
+        MENU_OPT_SHOW_MODEL_INFO, /* Show model info. */
+        MENU_OPT_LIST_IFM /* List the current IFM. */
+  };
+
+}
+
 namespace arm {
 namespace app {
-
     /**
      * @brief           Run inference using given model
      *                  object. If profiling is enabled, it will log the
@@ -77,4 +159,5 @@
 } /* namespace app */
 } /* namespace arm */
 
-#endif /* USECASE_COMMON_UTILS_HPP */
\ No newline at end of file
+
+#endif /* USECASE_COMMON_UTILS_HPP */