MLECO-1868: Code static analyzer warnings fixes

Signed-off-by: alexander <alexander.efremov@arm.com>
Change-Id: Ie423e9cad3fabec6ab077ded7236813fe4933dea
diff --git a/source/use_case/img_class/src/UseCaseHandler.cc b/source/use_case/img_class/src/UseCaseHandler.cc
index f7e83f5..ffeb860 100644
--- a/source/use_case/img_class/src/UseCaseHandler.cc
+++ b/source/use_case/img_class/src/UseCaseHandler.cc
@@ -35,13 +35,13 @@
     * @param[out]      inputTensor   Pointer to the input tensor to be populated.
     * @return          true if tensor is loaded, false otherwise.
     **/
-    static bool _LoadImageIntoTensor(uint32_t imIdx, TfLiteTensor* inputTensor);
+    static bool LoadImageIntoTensor(uint32_t imIdx, TfLiteTensor* inputTensor);
 
     /**
      * @brief           Helper function to increment current image index.
      * @param[in,out]   ctx   Pointer to the application context object.
      **/
-    static void _IncrementAppCtxImageIdx(ApplicationContext& ctx);
+    static void IncrementAppCtxImageIdx(ApplicationContext& ctx);
 
     /**
      * @brief           Helper function to set the image index.
@@ -49,7 +49,7 @@
      * @param[in]       idx   Value to be set.
      * @return          true if index is set, false otherwise.
      **/
-    static bool _SetAppCtxImageIdx(ApplicationContext& ctx, uint32_t idx);
+    static bool SetAppCtxImageIdx(ApplicationContext& ctx, uint32_t idx);
 
     /**
      * @brief           Presents inference results using the data presentation
@@ -60,8 +60,8 @@
      *                              otherwise, this can be passed in as 0.
      * @return          true if successful, false otherwise.
      **/
-    static bool _PresentInferenceResult(hal_platform& platform,
-                                        const std::vector<ClassificationResult>& results);
+    static bool PresentInferenceResult(hal_platform& platform,
+                                       const std::vector<ClassificationResult>& results);
 
     /**
      * @brief           Helper function to convert a UINT8 image to INT8 format.
@@ -89,7 +89,7 @@
 
         /* If the request has a valid size, set the image index. */
         if (imgIndex < NUMBER_OF_FILES) {
-            if (!_SetAppCtxImageIdx(ctx, imgIndex)) {
+            if (!SetAppCtxImageIdx(ctx, imgIndex)) {
                 return false;
             }
         }
@@ -124,7 +124,7 @@
             std::string str_inf{"Running inference... "};
 
             /* Copy over the data. */
-            _LoadImageIntoTensor(ctx.Get<uint32_t>("imgIndex"), inputTensor);
+            LoadImageIntoTensor(ctx.Get<uint32_t>("imgIndex"), inputTensor);
 
             /* Display this image on the LCD. */
             platform.data_psn->present_data_image(
@@ -164,20 +164,20 @@
             arm::app::DumpTensor(outputTensor);
 #endif /* VERIFY_TEST_OUTPUT */
 
-            if (!_PresentInferenceResult(platform, results)) {
+            if (!PresentInferenceResult(platform, results)) {
                 return false;
             }
 
             profiler.PrintProfilingResult();
 
-            _IncrementAppCtxImageIdx(ctx);
+            IncrementAppCtxImageIdx(ctx);
 
         } while (runAll && ctx.Get<uint32_t>("imgIndex") != curImIdx);
 
         return true;
     }
 
-    static bool _LoadImageIntoTensor(const uint32_t imIdx, TfLiteTensor* inputTensor)
+    static bool LoadImageIntoTensor(uint32_t imIdx, TfLiteTensor* inputTensor)
     {
         const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ?
                               inputTensor->bytes : IMAGE_DATA_SIZE;
@@ -193,7 +193,7 @@
         return true;
     }
 
-    static void _IncrementAppCtxImageIdx(ApplicationContext& ctx)
+    static void IncrementAppCtxImageIdx(ApplicationContext& ctx)
     {
         auto curImIdx = ctx.Get<uint32_t>("imgIndex");
 
@@ -205,7 +205,7 @@
         ctx.Set<uint32_t>("imgIndex", curImIdx);
     }
 
-    static bool _SetAppCtxImageIdx(ApplicationContext& ctx, const uint32_t idx)
+    static bool SetAppCtxImageIdx(ApplicationContext& ctx, uint32_t idx)
     {
         if (idx >= NUMBER_OF_FILES) {
             printf_err("Invalid idx %u (expected less than %u)\n",
@@ -216,8 +216,8 @@
         return true;
     }
 
-    static bool _PresentInferenceResult(hal_platform& platform,
-                                        const std::vector<ClassificationResult>& results)
+    static bool PresentInferenceResult(hal_platform& platform,
+                                       const std::vector<ClassificationResult>& results)
     {
         constexpr uint32_t dataPsnTxtStartX1 = 150;
         constexpr uint32_t dataPsnTxtStartY1 = 30;