COMPMID-556 Improved indentation and error handling in format_doxygen.py

Change-Id: I6f51ffe6c324d9da500716b52c97c344f2a2a164
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110486
Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
index a3fd3fe..ef51cbe 100644
--- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
@@ -72,27 +72,27 @@
     using ActivationFunctionExecutorPtr = void (NEActivationLayerKernel::*)(const Window &window);
     /** Function to apply an activation function on a tensor.
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
     typename std::enable_if<std::is_same<T, float>::value, void>::type activation(const Window &window);
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
     /** Function to apply an activation function on a tensor.
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
     typename std::enable_if<std::is_same<T, float16_t>::value, void>::type activation(const Window &window);
 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
     /** Function to apply an activation function on a tensor.
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
     typename std::enable_if<std::is_same<T, qint8_t>::value, void>::type activation(const Window &window);
     /** Function to apply an activation function on a tensor.
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
     typename std::enable_if<std::is_same<T, qint16_t>::value, void>::type activation(const Window &window);
diff --git a/arm_compute/core/NEON/kernels/NEDerivativeKernel.h b/arm_compute/core/NEON/kernels/NEDerivativeKernel.h
index 7613b58..5d46516 100644
--- a/arm_compute/core/NEON/kernels/NEDerivativeKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDerivativeKernel.h
@@ -64,17 +64,17 @@
 private:
     /** Function to perform derivative along the X direction on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void derivative_x(const Window &window);
     /** Function to perform derivative along the Y direction on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void derivative_y(const Window &window);
     /** Function to perform derivative along the X and Y direction on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void derivative_xy(const Window &window);
     /** Common signature for all the specialised derivative functions
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
index 654dee2..7684350 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
@@ -58,27 +58,27 @@
     NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default;
     /** Initialise the kernel's input and output.
      *
-    * @param[in]  input           Input tensor. Data type supported: S32
-    * @param[in]  bias            Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
-    *                             Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
-    * @param[out] output          Output tensor. Data type supported: Data type supported: QASYMM8
-    * @param[in]  result_offset   Offset to be added to each element of the input matrix
-    * @param[in]  result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
-    * @param[in]  result_shift    Number of bits to shift right the result before converting back to QASYMM8
-    * @param[in]  min             (Optional) Min value used to saturate down the output result before converting back to QASYMM8
-    * @param[in]  max             (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
-    *                             Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+     * @param[in]  input           Input tensor. Data type supported: S32
+     * @param[in]  bias            Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+     *                             Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[out] output          Output tensor. Data type supported: Data type supported: QASYMM8
+     * @param[in]  result_offset   Offset to be added to each element of the input matrix
+     * @param[in]  result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
+     * @param[in]  result_shift    Number of bits to shift right the result before converting back to QASYMM8
+     * @param[in]  min             (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+     * @param[in]  max             (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+     *                             Along with @p min, this value can be used to implement "rectified linear unit" activation functions
      */
     void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0);
     /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel
      *
-    * @param[in] input  Input tensor. Data type supported: S32
-    * @param[in] bias   Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
-    *                   Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
-    * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
-    * @param[in] min    (Optional) Min value used to saturate down the output result before converting back to QASYMM8
-    * @param[in] max    (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
-    *                   Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+     * @param[in] input  Input tensor. Data type supported: S32
+     * @param[in] bias   Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+     *                   Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+     * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+     * @param[in] min    (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+     * @param[in] max    (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+     *                   Along with @p min, this value can be used to implement "rectified linear unit" activation functions
      */
     static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
 
diff --git a/arm_compute/core/NEON/kernels/NEHistogramKernel.h b/arm_compute/core/NEON/kernels/NEHistogramKernel.h
index 0fa911d..672472e 100644
--- a/arm_compute/core/NEON/kernels/NEHistogramKernel.h
+++ b/arm_compute/core/NEON/kernels/NEHistogramKernel.h
@@ -82,28 +82,28 @@
 private:
     /** Function to merge multiple partial histograms.
      *
-     *  @param[out] global_hist Pointer to the final histogram.
-     *  @param[in]  local_hist  Pointer to the partial histograms.
-     *  @param[in]  bins        Number of bins.
+     * @param[out] global_hist Pointer to the final histogram.
+     * @param[in]  local_hist  Pointer to the partial histograms.
+     * @param[in]  bins        Number of bins.
      */
     void merge_histogram(uint32_t *global_hist, const uint32_t *local_hist, size_t bins);
     /** Function to merge multiple minimum values of partial histograms.
      *
-     *  @param[out] global_min Pointer to the global min value.
-     *  @param[in]  local_min  Local min value.
+     * @param[out] global_min Pointer to the global min value.
+     * @param[in]  local_min  Local min value.
      */
     void merge_min(uint8_t *global_min, const uint8_t &local_min);
     /** Function to perform histogram on the given window
-      *
-     *  @param[in] win  Region on which to execute the kernel
-     *  @param[in] info Info about the executing thread
+     *
+     * @param[in] win  Region on which to execute the kernel
+     * @param[in] info Info about the executing thread
      */
     void histogram_U8(Window win, const ThreadInfo &info);
     /** Function to perform histogram on the given window where histogram is
      *         of fixed size 256 without ranges and offsets.
      *
-     *  @param[in] win  Region on which to execute the kernel
-     *  @param[in] info Info about the executing thread
+     * @param[in] win  Region on which to execute the kernel
+     * @param[in] info Info about the executing thread
      */
     void histogram_fixed_U8(Window win, const ThreadInfo &info);
     /** Pre-calculate the pixel windowing for every possible pixel
diff --git a/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h b/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h
index 46b2a8d..76c6163 100644
--- a/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h
+++ b/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h
@@ -66,17 +66,17 @@
 private:
     /** Function to perform magnitude on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void magnitude(const Window &window);
     /** Function to perform phase on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void phase(const Window &window);
     /** Function to perform magnitude and phase on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void magnitude_phase(const Window &window);
 
@@ -130,17 +130,17 @@
 private:
     /** Function to perform magnitude on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void magnitude(const Window &window);
     /** Function to perform phase on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void phase(const Window &window);
     /** Function to perform magnitude and phase on the given window
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     void magnitude_phase(const Window &window);
 
diff --git a/arm_compute/core/NEON/kernels/NEWarpKernel.h b/arm_compute/core/NEON/kernels/NEWarpKernel.h
index 3a1cab1..d7cb82f 100644
--- a/arm_compute/core/NEON/kernels/NEWarpKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWarpKernel.h
@@ -66,17 +66,17 @@
 protected:
     /** function to perform warp affine or warp perspective on the given window when border mode == UNDEFINED
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     virtual void warp_undefined(const Window &window) = 0;
     /** function to perform warp affine or warp perspective on the given window when border mode == CONSTANT
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     virtual void warp_constant(const Window &window) = 0;
     /** function to perform warp affine or warp perspective on the given window when border mode == REPLICATE
      *
-     *  @param[in] window Region on which to execute the kernel
+     * @param[in] window Region on which to execute the kernel
      */
     virtual void warp_replicate(const Window &window) = 0;
     /** Common signature for all the specialised warp functions