MLECO-2946: Draw boxes directly on the LCD

* No longer copy the source image
* Boxes are drawn directly to LCD rather than on source image
* Change c style casts to static casts

Signed-off-by: Richard Burton <richard.burton@arm.com>
Change-Id: Ib8e926cb1a87bc2c40424eb5aace40170c526f1d
diff --git a/source/hal/components/lcd_mps3/glcd_mps3.c b/source/hal/components/lcd_mps3/glcd_mps3.c
index 08d4c5e..55a6575 100644
--- a/source/hal/components/lcd_mps3/glcd_mps3.c
+++ b/source/hal/components/lcd_mps3/glcd_mps3.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -283,7 +283,7 @@
     wr_dat_stop();
 }
 
-void GLCD_Image(void *data, const uint32_t width,
+void GLCD_Image(const void *data, const uint32_t width,
     const uint32_t height, const uint32_t channels,
     const uint32_t pos_x, const uint32_t pos_y,
     const uint32_t downsample_factor)
diff --git a/source/hal/components/lcd_mps3/include/glcd_mps3.h b/source/hal/components/lcd_mps3/include/glcd_mps3.h
index c2810c0..313d4ca 100644
--- a/source/hal/components/lcd_mps3/include/glcd_mps3.h
+++ b/source/hal/components/lcd_mps3/include/glcd_mps3.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -182,7 +182,7 @@
  * @param[in]  downsample_factor    Factor by which the image
  *                                  is downsampled by.
  */
-void GLCD_Image(void *data, const uint32_t width,
+void GLCD_Image(const void *data, const uint32_t width,
                const uint32_t height, const uint32_t channels,
                const uint32_t pos_x, const uint32_t pos_y,
                const uint32_t downsample_factor);
diff --git a/source/hal/include/data_psn.h b/source/hal/include/data_psn.h
index 8c14c77..05d7649 100644
--- a/source/hal/include/data_psn.h
+++ b/source/hal/include/data_psn.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -31,7 +31,7 @@
     int (* system_init)(void);  /**< pointer to init function */
 
     /** Pointer to the image presentation function */
-    int (* present_data_image)(uint8_t *data, const uint32_t width,
+    int (* present_data_image)(const uint8_t *data, const uint32_t width,
         const uint32_t height, const uint32_t channels,
         const uint32_t pos_x, const uint32_t pos_y,
         const uint32_t downsample_factor);
diff --git a/source/hal/profiles/bare-metal/data_presentation/lcd/include/lcd_img.h b/source/hal/profiles/bare-metal/data_presentation/lcd/include/lcd_img.h
index e4ad791..b447767 100644
--- a/source/hal/profiles/bare-metal/data_presentation/lcd/include/lcd_img.h
+++ b/source/hal/profiles/bare-metal/data_presentation/lcd/include/lcd_img.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -40,7 +40,7 @@
  *                                  downsampled.
  * @return      0 if successful, non-zero otherwise.
  **/
-int lcd_display_image(uint8_t* data, const uint32_t width,
+int lcd_display_image(const uint8_t* data, const uint32_t width,
     const uint32_t height, const uint32_t channels,
     const uint32_t pos_x, const uint32_t pos_y,
     const uint32_t downsample_factor);
diff --git a/source/hal/profiles/bare-metal/data_presentation/lcd/lcd_img.c b/source/hal/profiles/bare-metal/data_presentation/lcd/lcd_img.c
index f03566f..7064396 100644
--- a/source/hal/profiles/bare-metal/data_presentation/lcd/lcd_img.c
+++ b/source/hal/profiles/bare-metal/data_presentation/lcd/lcd_img.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -60,7 +60,7 @@
     return show_title();
 }
 
-int lcd_display_image(uint8_t* data, const uint32_t width,
+int lcd_display_image(const uint8_t* data, const uint32_t width,
     const uint32_t height, const uint32_t channels,
     const uint32_t pos_x, const uint32_t pos_y,
     const uint32_t downsample_factor)
diff --git a/source/hal/profiles/native/data_presentation/log/include/log.h b/source/hal/profiles/native/data_presentation/log/include/log.h
index 9b9928f..796d0ef 100644
--- a/source/hal/profiles/native/data_presentation/log/include/log.h
+++ b/source/hal/profiles/native/data_presentation/log/include/log.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,7 +39,7 @@
  * @return      0 if successful, non-zero otherwise.
  **/
 
-int log_display_image(uint8_t* data, const uint32_t width,
+int log_display_image(const uint8_t* data, const uint32_t width,
                       const uint32_t height, const uint32_t channels,
                       const uint32_t pos_x, const uint32_t pos_y,
                       const uint32_t downsample_factor);
diff --git a/source/hal/profiles/native/data_presentation/log/log.c b/source/hal/profiles/native/data_presentation/log/log.c
index 1673af1..e37b4ca 100644
--- a/source/hal/profiles/native/data_presentation/log/log.c
+++ b/source/hal/profiles/native/data_presentation/log/log.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
  * SPDX-License-Identifier: Apache-2.0
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -32,7 +32,7 @@
     return 0;
 }
 
-int log_display_image(uint8_t* data, const uint32_t width,
+int log_display_image(const uint8_t* data, const uint32_t width,
                       const uint32_t height, const uint32_t channels,
                       const uint32_t pos_x, const uint32_t pos_y,
                       const uint32_t downsample_factor)
diff --git a/source/use_case/object_detection/include/DetectorPostProcessing.hpp b/source/use_case/object_detection/include/DetectorPostProcessing.hpp
index 5393f89..cdb14f5 100644
--- a/source/use_case/object_detection/include/DetectorPostProcessing.hpp
+++ b/source/use_case/object_detection/include/DetectorPostProcessing.hpp
@@ -59,21 +59,19 @@
          * @param[in]   numClasses    Number of classes.
          * @param[in]   topN          Top N for each class.
          **/
-        DetectorPostprocessing(float threshold = 0.5f,
-                                float nms = 0.45f,
-                                int numClasses = 1,
-                                int topN = 0);
+        explicit DetectorPostprocessing(float threshold = 0.5f,
+                                        float nms = 0.45f,
+                                        int numClasses = 1,
+                                        int topN = 0);
 
         /**
-         * @brief       Post processing part of Yolo object detection CNN.
-         * @param[in]   imgIn        Pointer to the input image,detection bounding boxes drown on it.
+         * @brief       Post processing part of YOLO object detection CNN.
          * @param[in]   imgRows      Number of rows in the input image.
          * @param[in]   imgCols      Number of columns in the input image.
          * @param[in]   modelOutput  Output tensors after CNN invoked.
          * @param[out]  resultsOut   Vector of detected results.
          **/
-        void RunPostProcessing(uint8_t* imgIn,
-                               uint32_t imgRows,
+        void RunPostProcessing(uint32_t imgRows,
                                uint32_t imgCols,
                                TfLiteTensor* modelOutput0,
                                TfLiteTensor* modelOutput1,
diff --git a/source/use_case/object_detection/src/DetectorPostProcessing.cc b/source/use_case/object_detection/src/DetectorPostProcessing.cc
index e97e6b3..a890c9e 100644
--- a/source/use_case/object_detection/src/DetectorPostProcessing.cc
+++ b/source/use_case/object_detection/src/DetectorPostProcessing.cc
@@ -17,7 +17,6 @@
 #include "DetectorPostProcessing.hpp"
 #include "PlatformMath.hpp"
 
-#include <algorithm>
 #include <cmath>
 
 namespace arm {
@@ -36,7 +35,6 @@
 {}
 
 void DetectorPostprocessing::RunPostProcessing(
-    uint8_t* imgIn,
     uint32_t imgRows,
     uint32_t imgCols,
     TfLiteTensor* modelOutput0,
@@ -117,9 +115,6 @@
                 tmpResult.m_h = boxHeight;
 
                 resultsOut.push_back(tmpResult);
-
-                /* TODO: Instead of draw on the image, return the boxes and draw on the LCD */
-                DrawBoxOnImage(imgIn, originalImageWidth, originalImageHeight, boxX, boxY, boxWidth, boxHeight);;
             }
         }
     }
@@ -159,7 +154,10 @@
 
                     /* Objectness score */
                     int bbox_obj_offset = h * width * channel + w * channel + anc * (numClasses + 5) + 4;
-                    float objectness = math::MathUtils::SigmoidF32(((float)net.branches[i].modelOutput[bbox_obj_offset] - net.branches[i].zeroPoint) * net.branches[i].scale);
+                    float objectness = math::MathUtils::SigmoidF32(
+                            (static_cast<float>(net.branches[i].modelOutput[bbox_obj_offset])
+                            - net.branches[i].zeroPoint
+                            ) * net.branches[i].scale);
 
                     if(objectness > threshold) {
                         image::Detection det;
@@ -171,11 +169,10 @@
                         int bbox_h_offset = bbox_x_offset + 3;
                         int bbox_scores_offset = bbox_x_offset + 5;
 
-                        det.bbox.x = ((float)net.branches[i].modelOutput[bbox_x_offset] - net.branches[i].zeroPoint) * net.branches[i].scale;
-                        det.bbox.y = ((float)net.branches[i].modelOutput[bbox_y_offset] - net.branches[i].zeroPoint) * net.branches[i].scale;
-                        det.bbox.w = ((float)net.branches[i].modelOutput[bbox_w_offset] - net.branches[i].zeroPoint) * net.branches[i].scale;
-                        det.bbox.h = ((float)net.branches[i].modelOutput[bbox_h_offset] - net.branches[i].zeroPoint) * net.branches[i].scale;
-
+                        det.bbox.x = (static_cast<float>(net.branches[i].modelOutput[bbox_x_offset]) - net.branches[i].zeroPoint) * net.branches[i].scale;
+                        det.bbox.y = (static_cast<float>(net.branches[i].modelOutput[bbox_y_offset]) - net.branches[i].zeroPoint) * net.branches[i].scale;
+                        det.bbox.w = (static_cast<float>(net.branches[i].modelOutput[bbox_w_offset]) - net.branches[i].zeroPoint) * net.branches[i].scale;
+                        det.bbox.h = (static_cast<float>(net.branches[i].modelOutput[bbox_h_offset]) - net.branches[i].zeroPoint) * net.branches[i].scale;
 
                         float bbox_x, bbox_y;
 
@@ -185,11 +182,14 @@
                         det.bbox.x = (bbox_x + w) / width;
                         det.bbox.y = (bbox_y + h) / height;
 
-                        det.bbox.w = exp(det.bbox.w) * net.branches[i].anchor[anc*2] / net.inputWidth;
-                        det.bbox.h = exp(det.bbox.h) * net.branches[i].anchor[anc*2+1] / net.inputHeight;
+                        det.bbox.w = std::exp(det.bbox.w) * net.branches[i].anchor[anc*2] / net.inputWidth;
+                        det.bbox.h = std::exp(det.bbox.h) * net.branches[i].anchor[anc*2+1] / net.inputHeight;
 
                         for (int s = 0; s < numClasses; s++) {
-                            float sig = math::MathUtils::SigmoidF32(((float)net.branches[i].modelOutput[bbox_scores_offset + s] - net.branches[i].zeroPoint) * net.branches[i].scale)*objectness;
+                            float sig = math::MathUtils::SigmoidF32(
+                                    (static_cast<float>(net.branches[i].modelOutput[bbox_scores_offset + s]) -
+                                    net.branches[i].zeroPoint) * net.branches[i].scale
+                                    ) * objectness;
                             det.prob.emplace_back((sig > threshold) ? sig : 0);
                         }
 
@@ -218,53 +218,6 @@
         num -=1;
 }
 
-void DetectorPostprocessing::DrawBoxOnImage(uint8_t* imgIn, int imWidth, int imHeight, int boxX,int boxY, int boxWidth, int boxHeight)
-{
-    auto CheckAndFixOffset = [](int im_width,int im_height,int& offset) {
-        if ( (offset) >= im_width*im_height*channelsImageDisplayed) {
-            offset = im_width * im_height * channelsImageDisplayed -1;
-        }
-        else if ( (offset) < 0) {
-            offset = 0;
-        }
-    };
-
-    /* Consistency checks */
-    if (!imgIn) {
-        return;
-    }
-
-    int offset=0;
-    for (int i=0; i < boxWidth; i++) {
-        /* Draw two horizontal lines */
-        for (int line=0; line < 2; line++) {
-            /*top*/
-            offset =(i + (boxY + line)*imWidth + boxX) * channelsImageDisplayed; /* channelsImageDisplayed for rgb or grayscale*/
-            CheckAndFixOffset(imWidth,imHeight,offset);
-            imgIn[offset] = 0xFF;
-            /*bottom*/
-            offset = (i + (boxY + boxHeight - line)*imWidth + boxX) * channelsImageDisplayed;
-            CheckAndFixOffset(imWidth,imHeight,offset);
-            imgIn[offset] = 0xFF;
-        }
-    }
-
-    for (int i=0; i < boxHeight; i++) {
-        /* Draw two vertical lines */
-        for (int line=0; line < 2; line++) {
-            /*left*/
-            offset = ((i + boxY)*imWidth + boxX + line)*channelsImageDisplayed;
-            CheckAndFixOffset(imWidth,imHeight,offset);
-            imgIn[offset] = 0xFF;
-            /*right*/
-            offset = ((i + boxY)*imWidth + boxX + boxWidth - line)*channelsImageDisplayed;
-            CheckAndFixOffset(imWidth,imHeight, offset);
-            imgIn[offset] = 0xFF;
-        }
-    }
-
-}
-
 } /* namespace object_detection */
 } /* namespace app */
 } /* namespace arm */
diff --git a/source/use_case/object_detection/src/UseCaseHandler.cc b/source/use_case/object_detection/src/UseCaseHandler.cc
index 620ce6c..257da4f 100644
--- a/source/use_case/object_detection/src/UseCaseHandler.cc
+++ b/source/use_case/object_detection/src/UseCaseHandler.cc
@@ -35,7 +35,21 @@
      * @return          true if successful, false otherwise.
      **/
     static bool PresentInferenceResult(hal_platform& platform,
-                                    const std::vector<arm::app::object_detection::DetectionResult>& results);
+                                       const std::vector<arm::app::object_detection::DetectionResult>& results);
+
+    /**
+     * @brief           Draw boxes directly on the LCD for all detected objects.
+     * @param[in]       platform           Reference to the hal platform object.
+     * @param[in]       results            Vector of detection results to be displayed.
+     * @param[in]       imageStartX        X coordinate where the image starts on the LCD.
+     * @param[in]       imageStartY        Y coordinate where the image starts on the LCD.
+     * @param[in]       imgDownscaleFactor How much image has been downscaled on LCD.
+     **/
+    static void DrawDetectionBoxes(hal_platform& platform,
+                                   const std::vector<arm::app::object_detection::DetectionResult>& results,
+                                   uint32_t imgStartX,
+                                   uint32_t imgStartY,
+                                   uint32_t imgDownscaleFactor);
 
     /* Object detection classification handler. */
     bool ObjectDetectionHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll)
@@ -97,18 +111,12 @@
             const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ?
                                 inputTensor->bytes : IMAGE_DATA_SIZE;
 
-            /* Copy of the image used for presentation, original images are read-only */
-            std::vector<uint8_t> g_image_buffer(nCols*nRows*channelsImageDisplayed);
-            if (nPresentationChannels == 3) {
-                memcpy(g_image_buffer.data(),curr_image, nCols * nRows * channelsImageDisplayed);
-            } else {
-                image::RgbToGrayscale(curr_image, g_image_buffer.data(), nCols * nRows);
-            }
+            /* Convert to gray scale and populate input tensor. */
             image::RgbToGrayscale(curr_image, dstPtr, copySz);
 
-            /* Display this image on the LCD. */
+            /* Display original image on the LCD. */
             platform.data_psn->present_data_image(
-                g_image_buffer.data(),
+                curr_image,
                 nCols, nRows, nPresentationChannels,
                 dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
 
@@ -119,7 +127,7 @@
 
             /* Display message on the LCD - inference running. */
             platform.data_psn->present_data_text(str_inf.c_str(), str_inf.size(),
-                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
 
             /* Run inference over this image. */
             info("Running inference on image %" PRIu32 " => %s\n", ctx.Get<uint32_t>("imgIndex"),
@@ -132,24 +140,21 @@
             /* Erase. */
             str_inf = std::string(str_inf.size(), ' ');
             platform.data_psn->present_data_text(str_inf.c_str(), str_inf.size(),
-                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
 
             /* Detector post-processing*/
             std::vector<object_detection::DetectionResult> results;
             TfLiteTensor* modelOutput0 = model.GetOutputTensor(0);
             TfLiteTensor* modelOutput1 = model.GetOutputTensor(1);
             postp.RunPostProcessing(
-                g_image_buffer.data(),
                 nRows,
                 nCols,
                 modelOutput0,
                 modelOutput1,
                 results);
 
-            platform.data_psn->present_data_image(
-                g_image_buffer.data(),
-                nCols, nRows, nPresentationChannels,
-                dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+            /* Draw boxes. */
+            DrawDetectionBoxes(platform, results, dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
 
 #if VERIFY_TEST_OUTPUT
             arm::app::DumpTensor(modelOutput0);
@@ -188,5 +193,34 @@
         return true;
     }
 
+    static void DrawDetectionBoxes(hal_platform& platform,
+                                   const std::vector<arm::app::object_detection::DetectionResult>& results,
+                                   uint32_t imgStartX,
+                                   uint32_t imgStartY,
+                                   uint32_t imgDownscaleFactor)
+    {
+        uint32_t lineThickness = 1;
+
+        for (const auto& result: results) {
+            /* Top line. */
+            platform.data_psn->present_box(imgStartX + result.m_x0/imgDownscaleFactor,
+                    imgStartY + result.m_y0/imgDownscaleFactor,
+                    result.m_w/imgDownscaleFactor, lineThickness, COLOR_GREEN);
+            /* Bot line. */
+            platform.data_psn->present_box(imgStartX + result.m_x0/imgDownscaleFactor,
+                    imgStartY + (result.m_y0 + result.m_h)/imgDownscaleFactor - lineThickness,
+                    result.m_w/imgDownscaleFactor, lineThickness, COLOR_GREEN);
+
+            /* Left line. */
+            platform.data_psn->present_box(imgStartX + result.m_x0/imgDownscaleFactor,
+                    imgStartY + result.m_y0/imgDownscaleFactor,
+                    lineThickness, result.m_h/imgDownscaleFactor, COLOR_GREEN);
+            /* Right line. */
+            platform.data_psn->present_box(imgStartX + (result.m_x0 + result.m_w)/imgDownscaleFactor - lineThickness,
+                    imgStartY + result.m_y0/imgDownscaleFactor,
+                    lineThickness, result.m_h/imgDownscaleFactor, COLOR_GREEN);
+        }
+    }
+
 } /* namespace app */
 } /* namespace arm */
diff --git a/tests/use_case/object_detection/InferenceTestYoloFastest.cc b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
index b3bd408..8ef012d 100644
--- a/tests/use_case/object_detection/InferenceTestYoloFastest.cc
+++ b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
@@ -96,7 +96,6 @@
 
     arm::app::object_detection::DetectorPostprocessing postp;
     postp.RunPostProcessing(
-        nullptr,
         nRows,
         nCols,
         output_arr[0],