MLECO-3186: Each use case should same namespace convention as KWS and ASR

Certain UCs required additional work due to case context variables which also
became part of a namespace in generated files.
Solution was to declare these extra variables as part of the UC namespace in the respective model.hpp files.
Additional changes to standardise use of namespaces may be required - proposing new task.

Minor typo and rewording of customizing.md in relevant sections included.

Signed-off-by: Liam Barry <liam.barry@arm.com>
Change-Id: Ie78f82a30be252cb841136ea5115f21fc8d762cb
diff --git a/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
index fb1606a..7610c4f 100644
--- a/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
+++ b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
@@ -43,45 +43,42 @@
             m_topN(topN)
 {
     /* Init PostProcessing */
-    this->m_net =
-    object_detection::Network {
-        .inputWidth = inputImgCols,
+    this->m_net = object_detection::Network{
+        .inputWidth  = inputImgCols,
         .inputHeight = inputImgRows,
-        .numClasses = numClasses,
-        .branches = {
-            object_detection::Branch {
-                        .resolution = inputImgCols/32,
-                        .numBox = 3,
-                        .anchor = anchor1,
-                        .modelOutput = this->m_outputTensor0->data.int8,
-                        .scale = (static_cast<TfLiteAffineQuantization*>(
-                                this->m_outputTensor0->quantization.params))->scale->data[0],
-                        .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
-                                this->m_outputTensor0->quantization.params))->zero_point->data[0],
-                        .size = this->m_outputTensor0->bytes
-            },
-            object_detection::Branch {
-                    .resolution = inputImgCols/16,
-                    .numBox = 3,
-                    .anchor = anchor2,
-                    .modelOutput = this->m_outputTensor1->data.int8,
-                    .scale = (static_cast<TfLiteAffineQuantization*>(
-                            this->m_outputTensor1->quantization.params))->scale->data[0],
-                    .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
-                            this->m_outputTensor1->quantization.params))->zero_point->data[0],
-                    .size = this->m_outputTensor1->bytes
-            }
-        },
-        .topN = m_topN
-    };
+        .numClasses  = numClasses,
+        .branches =
+            {object_detection::Branch{.resolution  = inputImgCols / 32,
+                                      .numBox      = 3,
+                                      .anchor      = arm::app::object_detection::anchor1,
+                                      .modelOutput = this->m_outputTensor0->data.int8,
+                                      .scale       = (static_cast<TfLiteAffineQuantization*>(
+                                                    this->m_outputTensor0->quantization.params))
+                                                   ->scale->data[0],
+                                      .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
+                                                        this->m_outputTensor0->quantization.params))
+                                                       ->zero_point->data[0],
+                                      .size = this->m_outputTensor0->bytes},
+             object_detection::Branch{.resolution  = inputImgCols / 16,
+                                      .numBox      = 3,
+                                      .anchor      = arm::app::object_detection::anchor2,
+                                      .modelOutput = this->m_outputTensor1->data.int8,
+                                      .scale       = (static_cast<TfLiteAffineQuantization*>(
+                                                    this->m_outputTensor1->quantization.params))
+                                                   ->scale->data[0],
+                                      .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
+                                                        this->m_outputTensor1->quantization.params))
+                                                       ->zero_point->data[0],
+                                      .size = this->m_outputTensor1->bytes}},
+        .topN = m_topN};
     /* End init */
 }
 
 bool DetectorPostProcess::DoPostProcess()
 {
     /* Start postprocessing */
-    int originalImageWidth = originalImageSize;
-    int originalImageHeight = originalImageSize;
+    int originalImageWidth  = arm::app::object_detection::originalImageSize;
+    int originalImageHeight = arm::app::object_detection::originalImageSize;
 
     std::forward_list<image::Detection> detections;
     GetNetworkBoxes(this->m_net, originalImageWidth, originalImageHeight, m_threshold, detections);