Face detection demo from Emza Visual Sense
Signed-off-by: Michael Levit michaell@emza-vs.com

Change-Id: I7958b05b5dbe9a785e0f8a241b716c17a9ca976f
diff --git a/source/use_case/object_detection/include/DetectionResult.hpp b/source/use_case/object_detection/include/DetectionResult.hpp
new file mode 100644
index 0000000..78895f7
--- /dev/null
+++ b/source/use_case/object_detection/include/DetectionResult.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DETECTION_RESULT_HPP
+#define DETECTION_RESULT_HPP
+
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief   Class representing a single detection result.
+     */
+    class DetectionResult {
+    public:
+        double  m_normalisedVal{0.0};
+        int     m_x0{0};
+        int     m_y0{0};
+        int     m_w{0};
+        int     m_h{0};
+       
+        DetectionResult() = default;
+        ~DetectionResult() = default;
+        
+        DetectionResult(double normalisedVal,int x0,int y0, int w,int h) :
+                m_normalisedVal(normalisedVal),
+                m_x0(x0),
+                m_y0(y0),
+                m_w(w),
+                m_h(h) 
+            {
+            }
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* DETECTION_RESULT_HPP */
diff --git a/source/use_case/object_detection/include/DetectionUseCaseUtils.hpp b/source/use_case/object_detection/include/DetectionUseCaseUtils.hpp
new file mode 100644
index 0000000..8ef48ac
--- /dev/null
+++ b/source/use_case/object_detection/include/DetectionUseCaseUtils.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DETECTION_USE_CASE_UTILS_HPP
+#define DETECTION_USE_CASE_UTILS_HPP
+
+#include "hal.h"
+#include "DetectionResult.hpp"
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include <inttypes.h>
+#include <vector>
+
+
+void DisplayDetectionMenu();
+
+namespace image{
+
+
+  /**
+   * @brief           Presents inference results using the data presentation
+   *                  object.
+   * @param[in]       platform    Reference to the hal platform object.
+   * @param[in]       results     Vector of detection results to be displayed.
+   * @return          true if successful, false otherwise.
+   **/
+  bool PresentInferenceResult(hal_platform & platform,
+                              const std::vector < arm::app::DetectionResult > & results);
+
+
+  /**
+   * @brief           Presents inference results along with the inference time using the data presentation
+   *                  object.
+   * @param[in]       platform    Reference to the hal platform object.
+   * @param[in]       results     Vector of detection results to be displayed.
+   * @param[in]       infTimeMs   Inference time in ms.
+   * @return          true if successful, false otherwise.
+   **/
+  bool PresentInferenceResult(hal_platform & platform,
+                              const std::vector < arm::app::DetectionResult > & results,
+                              const time_t infTimeMs);
+
+  /**
+  * @brief           Presents inference results along with the inference time using the data presentation
+  *                  object.
+  * @param[in]       platform    Reference to the hal platform object.
+  * @param[in]       results     Vector of detection results to be displayed.
+  * @param[in]       infTimeMs   Inference time in ms.
+  * @return          true if successful, false otherwise.
+  **/
+  bool PresentInferenceResult(hal_platform & platform,
+                              const std::vector < arm::app::DetectionResult > & results,
+                              bool profilingEnabled,
+                              const time_t infTimeMs = 0);
+  }
+
+
+
+
+#endif /* DETECTION_USE_CASE_UTILS_HPP */
diff --git a/source/use_case/object_detection/include/DetectorPostProcessing.hpp b/source/use_case/object_detection/include/DetectorPostProcessing.hpp
new file mode 100644
index 0000000..9a8549c
--- /dev/null
+++ b/source/use_case/object_detection/include/DetectorPostProcessing.hpp
@@ -0,0 +1,55 @@
+/*

+ * Copyright (c) 2022 Arm Limited. All rights reserved.

+ * SPDX-License-Identifier: Apache-2.0

+ *

+ * Licensed under the Apache License, Version 2.0 (the "License");

+ * you may not use this file except in compliance with the License.

+ * You may obtain a copy of the License at

+ *

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ *

+ * Unless required by applicable law or agreed to in writing, software

+ * distributed under the License is distributed on an "AS IS" BASIS,

+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * See the License for the specific language governing permissions and

+ * limitations under the License.

+ */

+#ifndef DETECTOR_POST_PROCESSING_HPP

+#define DETECTOR_POST_PROCESSING_HPP

+

+#include "UseCaseCommonUtils.hpp"

+#include "DetectionResult.hpp"

+

+namespace arm {

+namespace app {

+

+#if DISPLAY_RGB_IMAGE

+#define FORMAT_MULTIPLY_FACTOR 3

+#else

+#define FORMAT_MULTIPLY_FACTOR 1

+#endif /* DISPLAY_RGB_IMAGE */

+

+    /**

+     * @brief       Post processing part of Yolo object detection CNN

+     * @param[in]   img_in        Pointer to the input image,detection bounding boxes drown on it.

+     * @param[in]   model_output  Output tesnsors after CNN invoked

+     * @param[out]  results_out   Vector of detected results.

+     * @return      void

+     **/

+void RunPostProcessing(uint8_t *img_in,TfLiteTensor* model_output[2],std::vector<arm::app::DetectionResult> & results_out);

+

+

+    /**

+     * @brief       Converts RGB image to grayscale

+     * @param[in]   rgb    Pointer to RGB input image

+     * @param[out]  gray   Pointer to RGB out image

+     * @param[in]   im_w   Input image width

+     * @param[in]   im_h   Input image height

+     * @return      void

+     **/

+void RgbToGrayscale(const uint8_t *rgb,uint8_t *gray, int im_w,int im_h);

+

+} /* namespace app */

+} /* namespace arm */

+

+#endif /* DETECTOR_POST_PROCESSING_HPP */

diff --git a/source/use_case/object_detection/include/UseCaseHandler.hpp b/source/use_case/object_detection/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..56629c8
--- /dev/null
+++ b/source/use_case/object_detection/include/UseCaseHandler.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef OBJ_DET_HANDLER_HPP
+#define OBJ_DET_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+    /**
+     * @brief       Handles the inference event.
+     * @param[in]   ctx        Pointer to the application context.
+     * @param[in]   imgIndex   Index to the image to run object detection.
+     * @param[in]   runAll     Flag to request classification of all the available images.
+     * @return      true or false based on execution success.
+     **/
+    bool ObjectDetectionHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* OBJ_DET_HANDLER_HPP */
diff --git a/source/use_case/object_detection/include/YoloFastestModel.hpp b/source/use_case/object_detection/include/YoloFastestModel.hpp
new file mode 100644
index 0000000..f5709ea
--- /dev/null
+++ b/source/use_case/object_detection/include/YoloFastestModel.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef YOLO_FASTEST_MODEL_HPP
+#define YOLO_FASTEST_MODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+    class YoloFastestModel : public Model {
+
+    public:
+        /* Indices for the expected model - based on input tensor shape */
+        static constexpr uint32_t ms_inputRowsIdx     = 1;
+        static constexpr uint32_t ms_inputColsIdx     = 2;
+        static constexpr uint32_t ms_inputChannelsIdx = 3;
+
+    protected:
+        /** @brief   Gets the reference to op resolver interface class. */
+        const tflite::MicroOpResolver& GetOpResolver() override;
+
+        /** @brief   Adds operations to the op resolver instance. */
+        bool EnlistOperations() override;
+
+        const uint8_t* ModelPointer() override;
+
+        size_t ModelSize() override;
+
+    private:
+        /* Maximum number of individual operations that can be enlisted. */
+        static constexpr int ms_maxOpCnt = 8;
+
+        /* A mutable op resolver instance. */
+        tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
+    };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* YOLO_FASTEST_MODEL_HPP */
diff --git a/source/use_case/object_detection/src/DetectionUseCaseUtils.cc b/source/use_case/object_detection/src/DetectionUseCaseUtils.cc
new file mode 100644
index 0000000..1713c7e
--- /dev/null
+++ b/source/use_case/object_detection/src/DetectionUseCaseUtils.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DetectionUseCaseUtils.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "InputFiles.hpp"
+#include <inttypes.h>
+
+
+void DisplayDetectionMenu()
+{
+    printf("\n\n");
+    printf("User input required\n");
+    printf("Enter option number from:\n\n");
+    printf("  %u. Run detection on next ifm\n", common::MENU_OPT_RUN_INF_NEXT);
+    printf("  %u. Run detection ifm at chosen index\n", common::MENU_OPT_RUN_INF_CHOSEN);
+    printf("  %u. Run detection on all ifm\n", common::MENU_OPT_RUN_INF_ALL);
+    printf("  %u. Show NN model info\n", common::MENU_OPT_SHOW_MODEL_INFO);
+    printf("  %u. List ifm\n\n", common::MENU_OPT_LIST_IFM);
+    printf("  Choice: ");
+    fflush(stdout);
+}
+
+
+bool image::PresentInferenceResult(hal_platform& platform,
+                                   const std::vector<arm::app::DetectionResult>& results)
+{
+    return PresentInferenceResult(platform, results, false);
+}
+
+bool image::PresentInferenceResult(hal_platform &platform,
+                                   const std::vector<arm::app::DetectionResult> &results,
+                                   const time_t infTimeMs)
+{
+    return PresentInferenceResult(platform, results, true, infTimeMs);
+}
+
+
+bool image::PresentInferenceResult(hal_platform &platform,
+                                   const std::vector<arm::app::DetectionResult> &results,
+                                   bool profilingEnabled,
+                                   const time_t infTimeMs)
+{
+    constexpr uint32_t dataPsnTxtStartX1 = 150;
+    constexpr uint32_t dataPsnTxtStartY1 = 30;
+
+
+    if(profilingEnabled)
+    {
+        platform.data_psn->set_text_color(COLOR_YELLOW);
+
+        /* If profiling is enabled, and the time is valid. */
+        info("Final results:\n");
+        info("Total number of inferences: 1\n");
+        if (infTimeMs)
+        {
+            std::string strInf =
+                    std::string{"Inference: "} +
+                    std::to_string(infTimeMs) +
+                    std::string{"ms"};
+            platform.data_psn->present_data_text(
+                    strInf.c_str(), strInf.size(),
+                    dataPsnTxtStartX1, dataPsnTxtStartY1, 0);
+        }
+    }
+    platform.data_psn->set_text_color(COLOR_GREEN);
+
+    if(!profilingEnabled) {
+        info("Final results:\n");
+        info("Total number of inferences: 1\n");
+    }
+
+    for (uint32_t i = 0; i < results.size(); ++i) {
+        
+        if(profilingEnabled) {
+            info("%" PRIu32 ")  (%f) -> %s {x=%d,y=%d,w=%d,h=%d}\n", i, 
+                 results[i].m_normalisedVal, "Detection box:",
+                 results[i].m_x0, results[i].m_y0, results[i].m_w, results[i].m_h );
+        }
+        else
+        {
+            info("%" PRIu32 ")  (%f) -> %s {x=%d,y=%d,w=%d,h=%d}\n", i, 
+                 results[i].m_normalisedVal, "Detection box:",
+                 results[i].m_x0, results[i].m_y0, results[i].m_w, results[i].m_h );
+        }
+    }
+
+    return true;
+}
+
+
+
diff --git a/source/use_case/object_detection/src/DetectorPostProcessing.cc b/source/use_case/object_detection/src/DetectorPostProcessing.cc
new file mode 100755
index 0000000..e781b62
--- /dev/null
+++ b/source/use_case/object_detection/src/DetectorPostProcessing.cc
@@ -0,0 +1,447 @@
+/*

+ * Copyright (c) 2022 Arm Limited. All rights reserved.

+ * SPDX-License-Identifier: Apache-2.0

+ *

+ * Licensed under the Apache License, Version 2.0 (the "License");

+ * you may not use this file except in compliance with the License.

+ * You may obtain a copy of the License at

+ *

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ *

+ * Unless required by applicable law or agreed to in writing, software

+ * distributed under the License is distributed on an "AS IS" BASIS,

+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * See the License for the specific language governing permissions and

+ * limitations under the License.

+ */

+#include "DetectorPostProcessing.hpp"

+#include <algorithm>

+#include <cmath>

+#include <stdint.h>

+#include <forward_list>

+

+

+typedef struct boxabs {

+    float left, right, top, bot;

+} boxabs;

+

+

+typedef struct branch {

+    int resolution;

+    int num_box;

+    float *anchor;

+    int8_t *tf_output;

+    float scale;

+    int zero_point;

+    size_t size;

+    float scale_x_y;

+} branch;

+

+typedef struct network {

+    int input_w;

+    int input_h;

+    int num_classes;

+    int num_branch;

+    branch *branchs;

+    int topN;

+} network;

+

+

+typedef struct box {

+    float x, y, w, h;

+} box;

+

+typedef struct detection{

+    box bbox;

+    float *prob;

+    float objectness;

+} detection;

+

+

+

+static int sort_class;

+

+static void free_dets(std::forward_list<detection> &dets){

+    std::forward_list<detection>::iterator it;

+    for ( it = dets.begin(); it != dets.end(); ++it ){

+        free(it->prob);

+    }

+}

+

+float sigmoid(float x)

+{

+    return 1.f/(1.f + exp(-x));

+} 

+

+static bool det_objectness_comparator(detection &pa, detection &pb)

+{

+    return pa.objectness < pb.objectness;

+}

+

+static void insert_topN_det(std::forward_list<detection> &dets, detection det)

+{

+    std::forward_list<detection>::iterator it;

+    std::forward_list<detection>::iterator last_it;

+    for ( it = dets.begin(); it != dets.end(); ++it ){

+        if(it->objectness > det.objectness)

+            break;

+        last_it = it;

+    }

+    if(it != dets.begin()){

+        dets.emplace_after(last_it, det);

+        free(dets.begin()->prob);

+        dets.pop_front();

+    }

+    else{

+        free(det.prob);

+    }

+}

+

+static std::forward_list<detection> get_network_boxes(network *net, int image_w, int image_h, float thresh, int *num)

+{

+    std::forward_list<detection> dets;

+    int i;

+    int num_classes = net->num_classes;

+    *num = 0;

+

+    for (i = 0; i < net->num_branch; ++i) {

+        int height  = net->branchs[i].resolution;

+        int width = net->branchs[i].resolution;

+        int channel  = net->branchs[i].num_box*(5+num_classes);

+

+        for (int h = 0; h < net->branchs[i].resolution; h++) {

+            for (int w = 0; w < net->branchs[i].resolution; w++) {

+                for (int anc = 0; anc < net->branchs[i].num_box; anc++) {

+                    

+                    // objectness score

+                    int bbox_obj_offset = h * width * channel + w * channel + anc * (num_classes + 5) + 4;

+                    float objectness = sigmoid(((float)net->branchs[i].tf_output[bbox_obj_offset] - net->branchs[i].zero_point) * net->branchs[i].scale);

+

+                    if(objectness > thresh){

+                        detection det;

+                        det.prob = (float*)calloc(num_classes, sizeof(float));

+                        det.objectness = objectness;

+                        //get bbox prediction data for each anchor, each feature point

+                        int bbox_x_offset = bbox_obj_offset -4;

+                        int bbox_y_offset = bbox_x_offset + 1;

+                        int bbox_w_offset = bbox_x_offset + 2;

+                        int bbox_h_offset = bbox_x_offset + 3;

+                        int bbox_scores_offset = bbox_x_offset + 5;

+                        //int bbox_scores_step = 1;

+                        det.bbox.x = ((float)net->branchs[i].tf_output[bbox_x_offset] - net->branchs[i].zero_point) * net->branchs[i].scale;

+                        det.bbox.y = ((float)net->branchs[i].tf_output[bbox_y_offset] - net->branchs[i].zero_point) * net->branchs[i].scale;

+                        det.bbox.w = ((float)net->branchs[i].tf_output[bbox_w_offset] - net->branchs[i].zero_point) * net->branchs[i].scale;

+                        det.bbox.h = ((float)net->branchs[i].tf_output[bbox_h_offset] - net->branchs[i].zero_point) * net->branchs[i].scale;

+                        

+

+                        float bbox_x, bbox_y;

+

+                        // Eliminate grid sensitivity trick involved in YOLOv4

+                        bbox_x = sigmoid(det.bbox.x); //* net->branchs[i].scale_x_y - (net->branchs[i].scale_x_y - 1) / 2;

+                        bbox_y = sigmoid(det.bbox.y); //* net->branchs[i].scale_x_y - (net->branchs[i].scale_x_y - 1) / 2;

+                        det.bbox.x = (bbox_x + w) / width;

+                        det.bbox.y = (bbox_y + h) / height;

+

+                        det.bbox.w = exp(det.bbox.w) * net->branchs[i].anchor[anc*2] / net->input_w;

+                        det.bbox.h = exp(det.bbox.h) * net->branchs[i].anchor[anc*2+1] / net->input_h;

+                        

+                        for (int s = 0; s < num_classes; s++) {

+                            det.prob[s] = sigmoid(((float)net->branchs[i].tf_output[bbox_scores_offset + s] - net->branchs[i].zero_point) * net->branchs[i].scale)*objectness;

+                            det.prob[s] = (det.prob[s] > thresh) ? det.prob[s] : 0;

+                        }

+

+                        //correct_yolo_boxes 

+                        det.bbox.x *= image_w;

+                        det.bbox.w *= image_w;

+                        det.bbox.y *= image_h;

+                        det.bbox.h *= image_h;

+

+                        if (*num < net->topN || net->topN <=0){

+                            dets.emplace_front(det);

+                            *num += 1;

+                        }

+                        else if(*num ==  net->topN){

+                            dets.sort(det_objectness_comparator);

+                            insert_topN_det(dets,det);

+                            *num += 1;

+                        }else{

+                            insert_topN_det(dets,det);

+                        }

+                    }

+                }

+            }

+        }

+    }

+    if(*num > net->topN)

+        *num -=1;

+    return dets;

+}

+

+// init part

+

+static branch create_brach(int resolution, int num_box, float *anchor, int8_t *tf_output, size_t size, float scale, int zero_point)

+{

+    branch b;

+    b.resolution = resolution;

+    b.num_box = num_box;

+    b.anchor = anchor;

+    b.tf_output = tf_output;

+    b.size = size;

+    b.scale = scale;

+    b.zero_point = zero_point;

+    return b;

+}

+

+static network creat_network(int input_w, int input_h, int num_classes, int num_branch, branch* branchs, int topN)

+{

+    network net;

+    net.input_w = input_w;

+    net.input_h = input_h;

+    net.num_classes = num_classes;

+    net.num_branch = num_branch;

+    net.branchs = branchs;

+    net.topN = topN;

+    return net;

+}

+

+// NMS part

+

+static float Calc1DOverlap(float x1_center, float width1, float x2_center, float width2)

+{

+    float left_1 = x1_center - width1/2;

+    float left_2 = x2_center - width2/2;

+    float leftest;

+    if (left_1 > left_2) {

+        leftest = left_1;

+    } else {

+        leftest = left_2;    

+    }

+        

+    float right_1 = x1_center + width1/2;

+    float right_2 = x2_center + width2/2;

+    float rightest;

+    if (right_1 < right_2) {

+        rightest = right_1;

+    } else {

+        rightest = right_2;    

+    }

+        

+    return rightest - leftest;

+}

+

+

+static float CalcBoxIntersect(box box1, box box2)

+{

+    float width = Calc1DOverlap(box1.x, box1.w, box2.x, box2.w);

+    if (width < 0) return 0;

+    float height = Calc1DOverlap(box1.y, box1.h, box2.y, box2.h);

+    if (height < 0) return 0;

+    

+    float total_area = width*height;

+    return total_area;

+}

+

+

+static float CalcBoxUnion(box box1, box box2)

+{

+    float boxes_intersection = CalcBoxIntersect(box1, box2);

+    float boxes_union = box1.w*box1.h + box2.w*box2.h - boxes_intersection;

+    return boxes_union;

+}

+

+

+static float CalcBoxIOU(box box1, box box2)

+{

+    float boxes_intersection = CalcBoxIntersect(box1, box2); 

+    

+    if (boxes_intersection == 0) return 0;    

+    

+    float boxes_union = CalcBoxUnion(box1, box2);

+

+    if (boxes_union == 0) return 0;    

+    

+    return boxes_intersection / boxes_union;

+}

+

+

+static bool CompareProbs(detection &prob1, detection &prob2)

+{

+    return prob1.prob[sort_class] > prob2.prob[sort_class];

+}

+

+

+static void CalcNMS(std::forward_list<detection> &detections, int classes, float iou_threshold)

+{

+    int k;

+    

+    for (k = 0; k < classes; ++k) {

+        sort_class = k;

+        detections.sort(CompareProbs);

+        

+        for (std::forward_list<detection>::iterator it=detections.begin(); it != detections.end(); ++it){

+            if (it->prob[k] == 0) continue;

+            for (std::forward_list<detection>::iterator itc=std::next(it, 1); itc != detections.end(); ++itc){

+                if (itc->prob[k] == 0) continue;

+                if (CalcBoxIOU(it->bbox, itc->bbox) > iou_threshold) {

+                    itc->prob[k] = 0;

+                }

+            }

+        }

+    }

+}

+

+

+static void inline check_and_fix_offset(int im_w,int im_h,int *offset) 

+{

+    

+    if (!offset) return;    

+    

+    if ( (*offset) >= im_w*im_h*FORMAT_MULTIPLY_FACTOR)

+        (*offset) = im_w*im_h*FORMAT_MULTIPLY_FACTOR -1;

+    else if ( (*offset) < 0)

+            *offset =0;    

+    

+}

+

+

+static void DrawBoxOnImage(uint8_t *img_in,int im_w,int im_h,int bx,int by,int bw,int bh) 

+{

+    

+    if (!img_in) {

+        return;

+    }

+    

+    int offset=0;

+    for (int i=0; i < bw; i++) {        

+        /*draw two lines */

+        for (int line=0; line < 2; line++) {

+            /*top*/

+            offset =(i + (by + line)*im_w + bx)*FORMAT_MULTIPLY_FACTOR;

+            check_and_fix_offset(im_w,im_h,&offset);

+            img_in[offset] = 0xFF;  /* FORMAT_MULTIPLY_FACTOR for rgb or grayscale*/

+            /*bottom*/

+            offset = (i + (by + bh - line)*im_w + bx)*FORMAT_MULTIPLY_FACTOR;

+            check_and_fix_offset(im_w,im_h,&offset);

+            img_in[offset] = 0xFF;    

+        }                

+    }

+    

+    for (int i=0; i < bh; i++) {

+        /*draw two lines */

+        for (int line=0; line < 2; line++) {

+            /*left*/

+            offset = ((i + by)*im_w + bx + line)*FORMAT_MULTIPLY_FACTOR;

+            check_and_fix_offset(im_w,im_h,&offset);            

+            img_in[offset] = 0xFF;

+            /*right*/

+            offset = ((i + by)*im_w + bx + bw - line)*FORMAT_MULTIPLY_FACTOR;

+            check_and_fix_offset(im_w,im_h,&offset);            

+            img_in[offset] = 0xFF;    

+        }

+    }

+

+}

+

+

+void arm::app::RunPostProcessing(uint8_t *img_in,TfLiteTensor* model_output[2],std::vector<arm::app::DetectionResult> & results_out)

+{

+       

+    TfLiteTensor* output[2] = {nullptr,nullptr};

+    int input_w = INPUT_IMAGE_WIDTH;

+    int input_h = INPUT_IMAGE_HEIGHT;

+  

+    for(int anchor=0;anchor<2;anchor++)

+    {

+         output[anchor] = model_output[anchor];

+    }

+

+    /* init postprocessing 	 */

+    int num_classes = 1;

+    int num_branch = 2;

+    int topN = 0;

+

+    branch* branchs = (branch*)calloc(num_branch, sizeof(branch));

+

+    /*NOTE: anchors are different for any given input model size, estimated during training phase */

+    float anchor1[] = {38, 77, 47, 97, 61, 126};

+    float anchor2[] = {14, 26, 19, 37, 28, 55 };

+

+

+    branchs[0] = create_brach(INPUT_IMAGE_WIDTH/32, 3, anchor1, output[0]->data.int8, output[0]->bytes, ((TfLiteAffineQuantization*)(output[0]->quantization.params))->scale->data[0], ((TfLiteAffineQuantization*)(output[0]->quantization.params))->zero_point->data[0]);

+

+    branchs[1] = create_brach(INPUT_IMAGE_WIDTH/16, 3, anchor2, output[1]->data.int8, output[1]->bytes, ((TfLiteAffineQuantization*)(output[1]->quantization.params))->scale->data[0],((TfLiteAffineQuantization*)(output[1]->quantization.params))->zero_point->data[0]);

+

+    network net = creat_network(input_w, input_h, num_classes, num_branch, branchs,topN);

+    /* end init */

+

+    /* start postprocessing */

+    int nboxes=0;

+    float thresh = .5;//50%

+    float nms = .45;

+    int orig_image_width = ORIGINAL_IMAGE_WIDTH;

+    int orig_image_height = ORIGINAL_IMAGE_HEIGHT;

+    std::forward_list<detection> dets = get_network_boxes(&net, orig_image_width, orig_image_height, thresh, &nboxes);

+    /* do nms */

+    CalcNMS(dets, net.num_classes, nms);

+    uint8_t temp_unsuppressed_counter = 0;

+    int j;

+    for (std::forward_list<detection>::iterator it=dets.begin(); it != dets.end(); ++it){

+        float xmin = it->bbox.x - it->bbox.w / 2.0f;

+        float xmax = it->bbox.x + it->bbox.w / 2.0f;

+        float ymin = it->bbox.y - it->bbox.h / 2.0f;

+        float ymax = it->bbox.y + it->bbox.h / 2.0f;

+

+        if (xmin < 0) xmin = 0;

+        if (ymin < 0) ymin = 0;

+        if (xmax > orig_image_width) xmax = orig_image_width;

+        if (ymax > orig_image_height) ymax = orig_image_height;

+

+        float bx = xmin;

+        float by = ymin;

+        float bw = xmax - xmin;

+        float bh = ymax - ymin;

+

+        for (j = 0; j <  net.num_classes; ++j) {

+            if (it->prob[j] > 0) {

+

+                arm::app::DetectionResult tmp_result = {};

+                

+                tmp_result.m_normalisedVal = it->prob[j];

+                tmp_result.m_x0=bx;

+                tmp_result.m_y0=by;

+                tmp_result.m_w=bw;

+                tmp_result.m_h=bh;

+                

+                results_out.push_back(tmp_result);

+

+                DrawBoxOnImage(img_in,orig_image_width,orig_image_height,bx,by,bw,bh);

+                

+                temp_unsuppressed_counter++;

+            }

+        }

+    }

+

+    free_dets(dets);

+    free(branchs);

+

+}

+

+void arm::app::RgbToGrayscale(const uint8_t *rgb,uint8_t *gray, int im_w,int im_h) 

+{

+    float R=0.299;

+    float G=0.587; 

+    float B=0.114; 

+    for (int i=0; i< im_w*im_h; i++ ) {

+

+        uint32_t  int_gray = rgb[i*3 + 0]*R + rgb[i*3 + 1]*G+ rgb[i*3 + 2]*B;

+        /*clip if need */

+        if (int_gray <= UINT8_MAX) {

+            gray[i] =  int_gray;

+        } else {

+            gray[i] = UINT8_MAX;

+        }

+

+    }

+

+}

+

diff --git a/source/use_case/object_detection/src/MainLoop.cc b/source/use_case/object_detection/src/MainLoop.cc
new file mode 100644
index 0000000..b0fbf96
--- /dev/null
+++ b/source/use_case/object_detection/src/MainLoop.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"                    /* Brings in platform definitions. */
+#include "InputFiles.hpp"           /* For input images. */
+#include "YoloFastestModel.hpp"     /* Model class for running inference. */
+#include "UseCaseHandler.hpp"       /* Handlers for different user options. */
+#include "UseCaseCommonUtils.hpp"   /* Utils functions. */
+#include "DetectionUseCaseUtils.hpp"   /* Utils functions specific to object detection. */
+
+
+void main_loop(hal_platform& platform)
+{
+    arm::app::YoloFastestModel model;  /* Model wrapper object. */
+
+    /* Load the model. */
+    if (!model.Init()) {
+        printf_err("Failed to initialise model\n");
+        return;
+    }
+
+    /* Instantiate application context. */
+    arm::app::ApplicationContext caseContext;
+
+    arm::app::Profiler profiler{&platform, "object_detection"};
+    caseContext.Set<arm::app::Profiler&>("profiler", profiler);
+    caseContext.Set<hal_platform&>("platform", platform);
+    caseContext.Set<arm::app::Model&>("model", model);
+    caseContext.Set<uint32_t>("imgIndex", 0);
+
+    
+    /* Loop. */
+    bool executionSuccessful = true;
+    constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+    /* Loop. */
+    do {
+        int menuOption = common::MENU_OPT_RUN_INF_NEXT;
+        if (bUseMenu) {
+            DisplayDetectionMenu();
+            menuOption = arm::app::ReadUserInputAsInt(platform);
+            printf("\n");
+        }
+        switch (menuOption) {
+            case common::MENU_OPT_RUN_INF_NEXT:
+                executionSuccessful = ObjectDetectionHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), false);
+                break;
+            case common::MENU_OPT_RUN_INF_CHOSEN: {
+                printf("    Enter the image index [0, %d]: ", NUMBER_OF_FILES-1);
+                fflush(stdout);
+                auto imgIndex = static_cast<uint32_t>(arm::app::ReadUserInputAsInt(platform));
+                executionSuccessful = ObjectDetectionHandler(caseContext, imgIndex, false);
+                break;
+            }
+            case common::MENU_OPT_RUN_INF_ALL:
+                executionSuccessful = ObjectDetectionHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), true);
+                break;
+            case common::MENU_OPT_SHOW_MODEL_INFO:
+                executionSuccessful = model.ShowModelInfoHandler();
+                break;
+            case common::MENU_OPT_LIST_IFM:
+                executionSuccessful = ListFilesHandler(caseContext);
+                break;
+            default:
+                printf("Incorrect choice, try again.");
+                break;
+        }
+    } while (executionSuccessful && bUseMenu);
+    info("Main loop terminated.\n");
+}
diff --git a/source/use_case/object_detection/src/UseCaseHandler.cc b/source/use_case/object_detection/src/UseCaseHandler.cc
new file mode 100644
index 0000000..45df4f8
--- /dev/null
+++ b/source/use_case/object_detection/src/UseCaseHandler.cc
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+#include "InputFiles.hpp"
+#include "YoloFastestModel.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "DetectionUseCaseUtils.hpp"
+#include "DetectorPostProcessing.hpp"
+#include "hal.h"
+
+#include <inttypes.h>
+
+
+/* used for presentation, original images are read-only"*/
+static uint8_t g_image_buffer[INPUT_IMAGE_WIDTH*INPUT_IMAGE_HEIGHT*FORMAT_MULTIPLY_FACTOR] IFM_BUF_ATTRIBUTE = {}; 
+
+namespace arm {
+namespace app {
+
+
+    /* Object detection classification handler. */
+    bool ObjectDetectionHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll)
+    {
+        auto& platform = ctx.Get<hal_platform&>("platform");
+        auto& profiler = ctx.Get<Profiler&>("profiler");
+
+        constexpr uint32_t dataPsnImgDownscaleFactor = 1;
+        constexpr uint32_t dataPsnImgStartX = 10;
+        constexpr uint32_t dataPsnImgStartY = 35;
+
+        constexpr uint32_t dataPsnTxtInfStartX = 150;
+        constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+        platform.data_psn->clear(COLOR_BLACK);
+
+        auto& model = ctx.Get<Model&>("model");
+        
+        /* If the request has a valid size, set the image index. */
+        if (imgIndex < NUMBER_OF_FILES) {
+            if (!SetAppCtxIfmIdx(ctx, imgIndex, "imgIndex")) {
+                return false;
+            }
+        }
+        if (!model.IsInited()) {
+            printf_err("Model is not initialised! Terminating processing.\n");
+            return false;
+        }
+
+        auto curImIdx = ctx.Get<uint32_t>("imgIndex");
+
+        TfLiteTensor* inputTensor = model.GetInputTensor(0);
+
+        if (!inputTensor->dims) {
+            printf_err("Invalid input tensor dims\n");
+            return false;
+        } else if (inputTensor->dims->size < 3) {
+            printf_err("Input tensor dimension should be >= 3\n");
+            return false;
+        }
+
+        TfLiteIntArray* inputShape = model.GetInputShape(0);
+
+        const uint32_t nCols = inputShape->data[arm::app::YoloFastestModel::ms_inputColsIdx];
+        const uint32_t nRows = inputShape->data[arm::app::YoloFastestModel::ms_inputRowsIdx];
+        const uint32_t nPresentationChannels = FORMAT_MULTIPLY_FACTOR;
+
+        std::vector<DetectionResult> results;
+
+        do {
+            /* Strings for presentation/logging. */
+            std::string str_inf{"Running inference... "};
+
+            const uint8_t* curr_image = get_img_array(ctx.Get<uint32_t>("imgIndex"));
+
+            /* Copy over the data  and convert to gryscale */
+#if DISPLAY_RGB_IMAGE
+            memcpy(g_image_buffer,curr_image, INPUT_IMAGE_WIDTH*INPUT_IMAGE_HEIGHT*FORMAT_MULTIPLY_FACTOR);
+#else 
+            RgbToGrayscale(curr_image,g_image_buffer,INPUT_IMAGE_WIDTH,INPUT_IMAGE_HEIGHT);
+#endif /*DISPLAY_RGB_IMAGE*/
+            
+            RgbToGrayscale(curr_image,inputTensor->data.uint8,INPUT_IMAGE_WIDTH,INPUT_IMAGE_HEIGHT);
+
+
+            /* Display this image on the LCD. */
+            platform.data_psn->present_data_image(
+                g_image_buffer,
+                nCols, nRows, nPresentationChannels,
+                dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+
+            /* If the data is signed. */
+            if (model.IsDataSigned()) {
+                image::ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes);
+            }
+
+            /* Display message on the LCD - inference running. */
+            platform.data_psn->present_data_text(str_inf.c_str(), str_inf.size(),
+                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            /* Run inference over this image. */
+            info("Running inference on image %" PRIu32 " => %s\n", ctx.Get<uint32_t>("imgIndex"),
+                get_filename(ctx.Get<uint32_t>("imgIndex")));
+
+            if (!RunInference(model, profiler)) {
+                return false;
+            }
+
+            /* Erase. */
+            str_inf = std::string(str_inf.size(), ' ');
+            platform.data_psn->present_data_text(str_inf.c_str(), str_inf.size(),
+                                    dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+            /* Detector post-processing*/
+            TfLiteTensor* output_arr[2] = {nullptr,nullptr};
+            output_arr[0] = model.GetOutputTensor(0);
+            output_arr[1] = model.GetOutputTensor(1);
+            RunPostProcessing(g_image_buffer,output_arr,results);
+
+            platform.data_psn->present_data_image(
+                g_image_buffer,
+                nCols, nRows, nPresentationChannels,
+                dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+
+            /*Detector post-processing*/
+
+
+            /* Add results to context for access outside handler. */
+            ctx.Set<std::vector<DetectionResult>>("results", results);
+
+#if VERIFY_TEST_OUTPUT
+            arm::app::DumpTensor(outputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+
+            if (!image::PresentInferenceResult(platform, results)) {
+                return false;
+            }
+
+            profiler.PrintProfilingResult();
+
+            IncrementAppCtxIfmIdx(ctx,"imgIndex");
+
+        } while (runAll && ctx.Get<uint32_t>("imgIndex") != curImIdx);
+
+        return true;
+    }
+
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/object_detection/src/YoloFastestModel.cc b/source/use_case/object_detection/src/YoloFastestModel.cc
new file mode 100644
index 0000000..a8afd59
--- /dev/null
+++ b/source/use_case/object_detection/src/YoloFastestModel.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "YoloFastestModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::YoloFastestModel::GetOpResolver()
+{
+    return this->m_opResolver;
+}
+
+bool arm::app::YoloFastestModel::EnlistOperations()
+{
+    this->m_opResolver.AddDepthwiseConv2D();
+    this->m_opResolver.AddConv2D();
+    this->m_opResolver.AddAdd();
+    this->m_opResolver.AddResizeNearestNeighbor();
+    /*These are needed for UT to work, not needed on FVP */
+    this->m_opResolver.AddPad();
+    this->m_opResolver.AddMaxPool2D();
+    this->m_opResolver.AddConcatenation();
+
+#if defined(ARM_NPU)
+    if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
+        info("Added %s support to op resolver\n",
+            tflite::GetString_ETHOSU());
+    } else {
+        printf_err("Failed to add Arm NPU support to op resolver.");
+        return false;
+    }
+#endif /* ARM_NPU */
+    return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::YoloFastestModel::ModelPointer()
+{
+    return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::YoloFastestModel::ModelSize()
+{
+    return GetModelLen();
+}
diff --git a/source/use_case/object_detection/usecase.cmake b/source/use_case/object_detection/usecase.cmake
new file mode 100644
index 0000000..15bf534
--- /dev/null
+++ b/source/use_case/object_detection/usecase.cmake
@@ -0,0 +1,59 @@
+#----------------------------------------------------------------------------
+#  Copyright (c) 2022 Arm Limited. All rights reserved.
+#  SPDX-License-Identifier: Apache-2.0
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#----------------------------------------------------------------------------
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom image files to use, or path to a single image, in the evaluation application"
+    ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+    PATH_OR_FILE)
+
+USER_OPTION(${use_case}_IMAGE_SIZE "Square image size in pixels. Images will be resized to this size."
+    192
+    STRING)
+    
+add_compile_definitions(DISPLAY_RGB_IMAGE=1)
+add_compile_definitions(INPUT_IMAGE_WIDTH=${${use_case}_IMAGE_SIZE})
+add_compile_definitions(INPUT_IMAGE_HEIGHT=${${use_case}_IMAGE_SIZE})
+add_compile_definitions(ORIGINAL_IMAGE_WIDTH=${${use_case}_IMAGE_SIZE})
+add_compile_definitions(ORIGINAL_IMAGE_HEIGHT=${${use_case}_IMAGE_SIZE})
+
+
+# Generate input files
+generate_images_code("${${use_case}_FILE_PATH}"
+                     ${SRC_GEN_DIR}
+                     ${INC_GEN_DIR}
+                     "${${use_case}_IMAGE_SIZE}")
+
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+    0x00082000
+    STRING)
+
+if (ETHOS_U_NPU_ENABLED)
+    set(DEFAULT_MODEL_PATH      ${DEFAULT_MODEL_DIR}/yolo-fastest_192_face_v4_vela_${ETHOS_U_NPU_CONFIG_ID}.tflite)
+else()
+    set(DEFAULT_MODEL_PATH      ${DEFAULT_MODEL_DIR}/yolo-fastest_192_face_v4.tflite)
+endif()
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+    ${DEFAULT_MODEL_PATH}
+    FILEPATH
+    )
+
+# Generate model file
+generate_tflite_code(
+    MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+    DESTINATION ${SRC_GEN_DIR}
+    )