Apply clang-format on repository

Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.

Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/

There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.

Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
diff --git a/arm_compute/runtime/CPP/CPPScheduler.h b/arm_compute/runtime/CPP/CPPScheduler.h
index a5932d6..7f70b5f 100644
--- a/arm_compute/runtime/CPP/CPPScheduler.h
+++ b/arm_compute/runtime/CPP/CPPScheduler.h
@@ -55,10 +55,10 @@
     static CPPScheduler &get();
 
     // Inherited functions overridden
-    void set_num_threads(unsigned int num_threads) override;
-    void set_num_threads_with_affinity(unsigned int num_threads, BindFunc func) override;
+    void         set_num_threads(unsigned int num_threads) override;
+    void         set_num_threads_with_affinity(unsigned int num_threads, BindFunc func) override;
     unsigned int num_threads() const override;
-    void schedule(ICPPKernel *kernel, const Hints &hints) override;
+    void         schedule(ICPPKernel *kernel, const Hints &hints) override;
     void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors) override;
 
 protected:
diff --git a/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h b/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h
index 58b4bf2..9af4ed6 100644
--- a/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h
+++ b/arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h
@@ -61,8 +61,16 @@
      * @param[in]  keeps_size       (Optional) Number of filtered indices per class tensor of size [num_classes]. Data types supported: U32.
      * @param[in]  info             (Optional) BoxNMSLimitInfo information.
      */
-    void configure(const ITensor *scores_in, const ITensor *boxes_in, const ITensor *batch_splits_in, ITensor *scores_out, ITensor *boxes_out, ITensor *classes,
-                   ITensor *batch_splits_out = nullptr, ITensor *keeps = nullptr, ITensor *keeps_size = nullptr, const BoxNMSLimitInfo info = BoxNMSLimitInfo());
+    void configure(const ITensor        *scores_in,
+                   const ITensor        *boxes_in,
+                   const ITensor        *batch_splits_in,
+                   ITensor              *scores_out,
+                   ITensor              *boxes_out,
+                   ITensor              *classes,
+                   ITensor              *batch_splits_out = nullptr,
+                   ITensor              *keeps            = nullptr,
+                   ITensor              *keeps_size       = nullptr,
+                   const BoxNMSLimitInfo info             = BoxNMSLimitInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CPPDetectionOutputLayer
      *
      * @param[in] scores_in        The scores input tensor of size [count, num_classes]. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32
@@ -81,9 +89,16 @@
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out,
-                           const ITensorInfo *classes,
-                           const ITensorInfo *batch_splits_out = nullptr, const ITensorInfo *keeps = nullptr, const ITensorInfo *keeps_size = nullptr, const BoxNMSLimitInfo info = BoxNMSLimitInfo());
+    static Status validate(const ITensorInfo    *scores_in,
+                           const ITensorInfo    *boxes_in,
+                           const ITensorInfo    *batch_splits_in,
+                           const ITensorInfo    *scores_out,
+                           const ITensorInfo    *boxes_out,
+                           const ITensorInfo    *classes,
+                           const ITensorInfo    *batch_splits_out = nullptr,
+                           const ITensorInfo    *keeps            = nullptr,
+                           const ITensorInfo    *keeps_size       = nullptr,
+                           const BoxNMSLimitInfo info             = BoxNMSLimitInfo());
     // Inherited methods overridden:
     void run() override;
 
diff --git a/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h b/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h
index f2c7ccc..dc8c8e7 100644
--- a/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h
+++ b/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h
@@ -24,9 +24,8 @@
 #ifndef ARM_COMPUTE_CPP_DETECTION_OUTPUT_LAYER_H
 #define ARM_COMPUTE_CPP_DETECTION_OUTPUT_LAYER_H
 
-#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
-
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
 
 namespace arm_compute
 {
@@ -52,7 +51,11 @@
      *
      * @note Output contains all the detections. Of those, only the ones selected by the valid region are valid.
      */
-    void configure(const ITensor *input_loc, const ITensor *input_conf, const ITensor *input_priorbox, ITensor *output, DetectionOutputLayerInfo info = DetectionOutputLayerInfo());
+    void configure(const ITensor           *input_loc,
+                   const ITensor           *input_conf,
+                   const ITensor           *input_priorbox,
+                   ITensor                 *output,
+                   DetectionOutputLayerInfo info = DetectionOutputLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CPPDetectionOutputLayer
      *
      * @param[in] input_loc      The mbox location input tensor info. Data types supported: F32.
@@ -63,7 +66,10 @@
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output,
+    static Status validate(const ITensorInfo       *input_loc,
+                           const ITensorInfo       *input_conf,
+                           const ITensorInfo       *input_priorbox,
+                           const ITensorInfo       *output,
                            DetectionOutputLayerInfo info = DetectionOutputLayerInfo());
     // Inherited methods overridden:
     void run() override;
@@ -82,12 +88,12 @@
     int _num_priors;
     int _num;
 
-    std::vector<LabelBBox> _all_location_predictions;
+    std::vector<LabelBBox>                         _all_location_predictions;
     std::vector<std::map<int, std::vector<float>>> _all_confidence_scores;
-    std::vector<BBox> _all_prior_bboxes;
-    std::vector<std::array<float, 4>> _all_prior_variances;
-    std::vector<LabelBBox> _all_decode_bboxes;
-    std::vector<std::map<int, std::vector<int>>> _all_indices;
+    std::vector<BBox>                              _all_prior_bboxes;
+    std::vector<std::array<float, 4>>              _all_prior_variances;
+    std::vector<LabelBBox>                         _all_decode_bboxes;
+    std::vector<std::map<int, std::vector<int>>>   _all_indices;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CPP_DETECTION_OUTPUT_LAYER_H */
diff --git a/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h b/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
index 94248ff..a40e4f9 100644
--- a/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
+++ b/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
@@ -24,10 +24,9 @@
 #ifndef ARM_COMPUTE_CPP_DETECTION_POSTPROCESS_H
 #define ARM_COMPUTE_CPP_DETECTION_POSTPROCESS_H
 
-#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
-
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h"
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/MemoryGroup.h"
 #include "arm_compute/runtime/Tensor.h"
@@ -65,8 +64,14 @@
      *
      * @note Output contains all the detections. Of those, only the ones selected by the valid region are valid.
      */
-    void configure(const ITensor *input_box_encoding, const ITensor *input_score, const ITensor *input_anchors,
-                   ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores, ITensor *num_detection, DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
+    void configure(const ITensor                *input_box_encoding,
+                   const ITensor                *input_score,
+                   const ITensor                *input_anchors,
+                   ITensor                      *output_boxes,
+                   ITensor                      *output_classes,
+                   ITensor                      *output_scores,
+                   ITensor                      *num_detection,
+                   DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CPPDetectionPostProcessLayer
      *
      * @param[in]  input_box_encoding The bounding box input tensor info. Data types supported: F32/QASYMM8/QASYMM8_SIGNED.
@@ -80,8 +85,13 @@
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors,
-                           ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection,
+    static Status validate(const ITensorInfo            *input_box_encoding,
+                           const ITensorInfo            *input_class_score,
+                           const ITensorInfo            *input_anchors,
+                           ITensorInfo                  *output_boxes,
+                           ITensorInfo                  *output_classes,
+                           ITensorInfo                  *output_scores,
+                           ITensorInfo                  *num_detection,
                            DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
     // Inherited methods overridden:
     void run() override;
diff --git a/arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h b/arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h
index 71c44a8..af6afc6 100644
--- a/arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h
+++ b/arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h
@@ -24,9 +24,8 @@
 #ifndef ARM_COMPUTE_CPP_NONMAXIMUMSUPPRESSION_LAYER_H
 #define ARM_COMPUTE_CPP_NONMAXIMUMSUPPRESSION_LAYER_H
 
-#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
-
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
 
 namespace arm_compute
 {
@@ -48,7 +47,12 @@
      * @param[in]  nms_threshold   The threshold used in non maximum suppression.
      *
      */
-    void configure(const ITensor *bboxes, const ITensor *scores, ITensor *indices, unsigned int max_output_size, const float score_threshold, const float nms_threshold);
+    void configure(const ITensor *bboxes,
+                   const ITensor *scores,
+                   ITensor       *indices,
+                   unsigned int   max_output_size,
+                   const float    score_threshold,
+                   const float    nms_threshold);
 
     /** Static function to check if given arguments will lead to a valid configuration of @ref CPPNonMaximumSuppression
      *
@@ -60,8 +64,12 @@
      * @param[in]  nms_threshold   The threshold used in non maximum suppression.
      *
      */
-    static Status validate(const ITensorInfo *bboxes, const ITensorInfo *scores, const ITensorInfo *indices, unsigned int max_output_size,
-                           const float score_threshold, const float nms_threshold);
+    static Status validate(const ITensorInfo *bboxes,
+                           const ITensorInfo *scores,
+                           const ITensorInfo *indices,
+                           unsigned int       max_output_size,
+                           const float        score_threshold,
+                           const float        nms_threshold);
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CPP_NONMAXIMUMSUPPRESSION_LAYER_H */
diff --git a/arm_compute/runtime/CPP/functions/CPPPermute.h b/arm_compute/runtime/CPP/functions/CPPPermute.h
index 85c1502..232da41 100644
--- a/arm_compute/runtime/CPP/functions/CPPPermute.h
+++ b/arm_compute/runtime/CPP/functions/CPPPermute.h
@@ -24,9 +24,8 @@
 #ifndef ARM_COMPUTE_CPPPERMUTE_H
 #define ARM_COMPUTE_CPPPERMUTE_H
 
-#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
-
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
 
 namespace arm_compute
 {
@@ -53,5 +52,5 @@
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm);
 };
-}
+} // namespace arm_compute
 #endif /* ARM_COMPUTE_CPPPERMUTE_H */
diff --git a/arm_compute/runtime/CPP/functions/CPPSplit.h b/arm_compute/runtime/CPP/functions/CPPSplit.h
index 56aad2d..9be081f 100644
--- a/arm_compute/runtime/CPP/functions/CPPSplit.h
+++ b/arm_compute/runtime/CPP/functions/CPPSplit.h
@@ -29,7 +29,6 @@
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
-
 #include "arm_compute/runtime/IFunction.h"
 
 namespace arm_compute
@@ -39,8 +38,7 @@
 class CPPSplit : public IFunction
 {
 public:
-    CPPSplit()
-        : _outputs_vector(), _slice_functions(), _num_outputs(0)
+    CPPSplit() : _outputs_vector(), _slice_functions(), _num_outputs(0)
     {
     }
     /** Static function to check if given info will lead to a valid configuration of @ref CPPSplit
@@ -64,14 +62,16 @@
         unsigned int total_output_shape_size = 0;
 
         // Sum the output sizes and fall back to evenly-sized splits if any are zero
-        const bool using_split_shapes = std::none_of(outputs.begin(), outputs.end(), [&total_output_shape_size](ITensorInfo * info)
-        {
-            unsigned int output_shape_size = info->tensor_shape().total_size();
-            total_output_shape_size += output_shape_size;
-            return output_shape_size == 0;
-        });
+        const bool using_split_shapes = std::none_of(outputs.begin(), outputs.end(),
+                                                     [&total_output_shape_size](ITensorInfo *info)
+                                                     {
+                                                         unsigned int output_shape_size =
+                                                             info->tensor_shape().total_size();
+                                                         total_output_shape_size += output_shape_size;
+                                                         return output_shape_size == 0;
+                                                     });
 
-        if(using_split_shapes)
+        if (using_split_shapes)
         {
             ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != total_output_shape_size);
         }
@@ -83,10 +83,10 @@
 
         // Validate output tensors
         unsigned int axis_offset = 0;
-        for(const auto &output : outputs)
+        for (const auto &output : outputs)
         {
             ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
-            if(using_split_shapes)
+            if (using_split_shapes)
             {
                 output_shape = output->tensor_shape();
                 ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() == 0);
@@ -97,14 +97,14 @@
             // Start/End coordinates
             Coordinates start_coords;
             Coordinates end_coords;
-            for(unsigned int d = 0; d < output_shape.num_dimensions(); ++d)
+            for (unsigned int d = 0; d < output_shape.num_dimensions(); ++d)
             {
                 end_coords.set(d, -1);
             }
 
             // Output auto inizialitation if not yet initialized
             TensorInfo tmp_output_info = *output->clone();
-            if(tmp_output_info.tensor_shape().total_size() == 0)
+            if (tmp_output_info.tensor_shape().total_size() == 0)
             {
                 tmp_output_info = input->clone()->set_is_resizable(true).set_tensor_shape(output_shape);
             }
@@ -128,7 +128,8 @@
      *                     from the split dimension.
      * @param[in]  axis    Axis on which to split the input.
      */
-    void configure(const TensorInterfaceType *input, const std::vector<TensorInterfaceType *> &outputs, unsigned int axis)
+    void
+    configure(const TensorInterfaceType *input, const std::vector<TensorInterfaceType *> &outputs, unsigned int axis)
     {
         // Create Slice functions
         _num_outputs = outputs.size();
@@ -136,17 +137,16 @@
 
         // Extract output tensor info
         std::vector<ITensorInfo *> outputs_info;
-        for(auto &output : outputs)
+        for (auto &output : outputs)
         {
             ARM_COMPUTE_ERROR_ON_NULLPTR(output);
             outputs_info.emplace_back(output->info());
         }
 
         // If any of the outputs have a zero size, fall-back to using evenly-sized output splits
-        const bool outputs_have_sizes = std::none_of(outputs_info.begin(), outputs_info.end(), [](ITensorInfo * info)
-        {
-            return info->tensor_shape().total_size() == 0;
-        });
+        const bool outputs_have_sizes =
+            std::none_of(outputs_info.begin(), outputs_info.end(),
+                         [](ITensorInfo *info) { return info->tensor_shape().total_size() == 0; });
 
         // Validate
         ARM_COMPUTE_ERROR_THROW_ON(CPPSplit::validate(input->info(), outputs_info, axis));
@@ -154,12 +154,13 @@
         unsigned int axis_offset = 0;
         unsigned int i           = 0;
 
-        for(const auto &output_info : outputs_info)
+        for (const auto &output_info : outputs_info)
         {
             // Get output shape
-            TensorShape output_shape = (outputs_have_sizes ?
-                                        output_info->tensor_shape() :
-                                        arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs));
+            TensorShape output_shape =
+                (outputs_have_sizes
+                     ? output_info->tensor_shape()
+                     : arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs));
 
             const size_t axis_split_step = output_shape[axis];
 
@@ -167,7 +168,7 @@
             Coordinates start_coords;
             Coordinates end_coords;
 
-            for(unsigned int d = 0; d < output_shape.num_dimensions(); ++d)
+            for (unsigned int d = 0; d < output_shape.num_dimensions(); ++d)
             {
                 end_coords.set(d, -1);
             }
diff --git a/arm_compute/runtime/CPP/functions/CPPTopKV.h b/arm_compute/runtime/CPP/functions/CPPTopKV.h
index 2f63084..232cbb3 100644
--- a/arm_compute/runtime/CPP/functions/CPPTopKV.h
+++ b/arm_compute/runtime/CPP/functions/CPPTopKV.h
@@ -24,9 +24,8 @@
 #ifndef ARM_COMPUTE_CPPTOPKV_H
 #define ARM_COMPUTE_CPPTOPKV_H
 
-#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
-
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
 
 namespace arm_compute
 {
@@ -54,7 +53,8 @@
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *predictions, const ITensorInfo *targets, ITensorInfo *output, const unsigned int k);
+    static Status
+    validate(const ITensorInfo *predictions, const ITensorInfo *targets, ITensorInfo *output, const unsigned int k);
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CPPTOPKV_H */
diff --git a/arm_compute/runtime/CPP/functions/CPPUpsample.h b/arm_compute/runtime/CPP/functions/CPPUpsample.h
index b97d4d1..3b0f997 100644
--- a/arm_compute/runtime/CPP/functions/CPPUpsample.h
+++ b/arm_compute/runtime/CPP/functions/CPPUpsample.h
@@ -24,9 +24,8 @@
 #ifndef ARM_COMPUTE_CPPUPSAMPLE_H
 #define ARM_COMPUTE_CPPUPSAMPLE_H
 
-#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
-
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
 
 namespace arm_compute
 {
@@ -44,5 +43,5 @@
      */
     void configure(const ITensor *input, ITensor *output, const PadStrideInfo &info);
 };
-}
+} // namespace arm_compute
 #endif /* ARM_COMPUTE_CPPUPSAMPLE_H */