MLECO-1858: Documentation update

* Removing `_` in front of private functions and member

Signed-off-by: Isabella Gottardi <isabella.gottardi@arm.com>
Change-Id: I5a5d652f9647ebb16d2d2bd16ab980e73f7be3cf
diff --git a/source/use_case/kws/src/DsCnnModel.cc b/source/use_case/kws/src/DsCnnModel.cc
index a093eb4..4edfc04 100644
--- a/source/use_case/kws/src/DsCnnModel.cc
+++ b/source/use_case/kws/src/DsCnnModel.cc
@@ -20,21 +20,21 @@
 
 const tflite::MicroOpResolver& arm::app::DsCnnModel::GetOpResolver()
 {
-    return this->_m_opResolver;
+    return this->m_opResolver;
 }
 
 bool arm::app::DsCnnModel::EnlistOperations()
 {
-    this->_m_opResolver.AddReshape();
-    this->_m_opResolver.AddAveragePool2D();
-    this->_m_opResolver.AddConv2D();
-    this->_m_opResolver.AddDepthwiseConv2D();
-    this->_m_opResolver.AddFullyConnected();
-    this->_m_opResolver.AddRelu();
-    this->_m_opResolver.AddSoftmax();
+    this->m_opResolver.AddReshape();
+    this->m_opResolver.AddAveragePool2D();
+    this->m_opResolver.AddConv2D();
+    this->m_opResolver.AddDepthwiseConv2D();
+    this->m_opResolver.AddFullyConnected();
+    this->m_opResolver.AddRelu();
+    this->m_opResolver.AddSoftmax();
 
 #if defined(ARM_NPU)
-    if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+    if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
         info("Added %s support to op resolver\n",
             tflite::GetString_ETHOSU());
     } else {
diff --git a/source/use_case/kws/src/UseCaseHandler.cc b/source/use_case/kws/src/UseCaseHandler.cc
index eaf53c1..2144c03 100644
--- a/source/use_case/kws/src/UseCaseHandler.cc
+++ b/source/use_case/kws/src/UseCaseHandler.cc
@@ -52,8 +52,6 @@
      *                  object.
      * @param[in]       platform    Reference to the hal platform object.
      * @param[in]       results     Vector of classification results to be displayed.
-     * @param[in]       infTimeMs   Inference time in milliseconds, if available,
-     *                              otherwise, this can be passed in as 0.
      * @return          true if successful, false otherwise.
      **/
     static bool PresentInferenceResult(hal_platform& platform,
@@ -341,11 +339,11 @@
      * Real features math is done by a lambda function provided as a parameter.
      * Features are written to input tensor memory.
      *
-     * @tparam T            Feature vector type.
-     * @param inputTensor   Model input tensor pointer.
-     * @param cacheSize     Number of feature vectors to cache. Defined by the sliding window overlap.
-     * @param compute       Features calculator function.
-     * @return              Lambda function to compute features.
+     * @tparam T                Feature vector type.
+     * @param[in] inputTensor   Model input tensor pointer.
+     * @param[in] cacheSize     Number of feature vectors to cache. Defined by the sliding window overlap.
+     * @param[in] compute       Features calculator function.
+     * @return                  Lambda function to compute features.
      */
     template<class T>
     std::function<void (std::vector<int16_t>&, size_t, bool, size_t)>