Apply clang-format on repository

Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.

Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/

There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.

Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
diff --git a/arm_compute/graph/Edge.h b/arm_compute/graph/Edge.h
index 5e81b9c..7f5075d 100644
--- a/arm_compute/graph/Edge.h
+++ b/arm_compute/graph/Edge.h
@@ -48,8 +48,18 @@
      * @param[in] consumer_idx Consumer node input index
      * @param[in] tensor       Tensor associated with the edge
      */
-    Edge(EdgeID id, INode *producer, unsigned int producer_idx, INode *consumer, unsigned int consumer_idx, Tensor *tensor)
-        : _id(id), _producer(producer), _consumer(consumer), _producer_idx(producer_idx), _consumer_idx(consumer_idx), _tensor(tensor)
+    Edge(EdgeID       id,
+         INode       *producer,
+         unsigned int producer_idx,
+         INode       *consumer,
+         unsigned int consumer_idx,
+         Tensor      *tensor)
+        : _id(id),
+          _producer(producer),
+          _consumer(consumer),
+          _producer_idx(producer_idx),
+          _consumer_idx(consumer_idx),
+          _tensor(tensor)
 
     {
     }
diff --git a/arm_compute/graph/Graph.h b/arm_compute/graph/Graph.h
index 806d84c..e6e173f 100644
--- a/arm_compute/graph/Graph.h
+++ b/arm_compute/graph/Graph.h
@@ -79,7 +79,7 @@
      * @return ID of the node
      */
     template <typename NT, typename... Ts>
-    NodeID add_node(Ts &&... args);
+    NodeID add_node(Ts &&...args);
     /** Remove the node with the given ID
      *
      * @param[in] nid ID of the node to remove
@@ -221,17 +221,17 @@
     TensorID create_tensor(const TensorDescriptor &desc = TensorDescriptor());
 
 private:
-    GraphID                              _id      = GraphID(0); /**< Graph id */
-    std::string                          _name    = {};         /**< Graph name */
-    std::vector<std::unique_ptr<INode>>  _nodes   = {};         /**< Graph nodes */
-    std::vector<std::unique_ptr<Edge>>   _edges   = {};         /**< Graph edges */
-    std::vector<std::unique_ptr<Tensor>> _tensors = {};         /**< Graph tensors */
+    GraphID                                 _id           = GraphID(0); /**< Graph id */
+    std::string                             _name         = {};         /**< Graph name */
+    std::vector<std::unique_ptr<INode>>     _nodes        = {};         /**< Graph nodes */
+    std::vector<std::unique_ptr<Edge>>      _edges        = {};         /**< Graph edges */
+    std::vector<std::unique_ptr<Tensor>>    _tensors      = {};         /**< Graph tensors */
     std::map<NodeType, std::vector<NodeID>> _tagged_nodes = {}; /**< Graph nodes map with the node type as key */
-    arm_compute::Mutex _mtx = {};                               /**< Mutex used for graph construction */
+    arm_compute::Mutex                      _mtx          = {}; /**< Mutex used for graph construction */
 };
 
 template <typename NT, typename... Ts>
-inline NodeID Graph::add_node(Ts &&... args)
+inline NodeID Graph::add_node(Ts &&...args)
 {
     arm_compute::lock_guard<arm_compute::Mutex> lock(_mtx);
 
@@ -245,7 +245,7 @@
     _tagged_nodes[node->type()].push_back(nid);
 
     // Associate a new tensor with each output
-    for(auto &output : node->_outputs)
+    for (auto &output : node->_outputs)
     {
         output = create_tensor();
     }
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index cb88c0e..118d06b 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -51,7 +51,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_const_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
+    static NodeID
+    add_const_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
     /** Adds an input layer node to the graph
      *
      * @param[in] g        Graph to add the node to
@@ -61,7 +62,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_input_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
+    static NodeID
+    add_input_node(Graph &g, NodeParams params, const TensorDescriptor &desc, ITensorAccessorUPtr accessor = nullptr);
     /** Adds an output layer node to the graph
      *
      * @param[in] g        Graph to add the node to
@@ -71,7 +73,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor = nullptr);
+    static NodeID
+    add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor = nullptr);
     /** Adds an activation layer node to the graph
      *
      * @param[in] g              Graph to add the node to
@@ -82,7 +85,10 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_activation_node(Graph &g, NodeParams params, NodeIdxPair input, ActivationLayerInfo act_info,
+    static NodeID add_activation_node(Graph                  &g,
+                                      NodeParams              params,
+                                      NodeIdxPair             input,
+                                      ActivationLayerInfo     act_info,
                                       const QuantizationInfo &out_quant_info = QuantizationInfo());
     /** Adds an activation layer node to the graph
      *
@@ -96,7 +102,11 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_arg_min_max_node(Graph &g, NodeParams params, NodeIdxPair input, ReductionOperation op, unsigned int axis,
+    static NodeID add_arg_min_max_node(Graph                  &g,
+                                       NodeParams              params,
+                                       NodeIdxPair             input,
+                                       ReductionOperation      op,
+                                       unsigned int            axis,
                                        DataType                out_data_type  = DataType::UNKNOWN,
                                        const QuantizationInfo &out_quant_info = QuantizationInfo());
     /** Adds a batch normalization layer node to the graph
@@ -112,9 +122,14 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_batch_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, float epsilon,
-                                               ITensorAccessorUPtr mean_accessor = nullptr, ITensorAccessorUPtr var_accessor = nullptr,
-                                               ITensorAccessorUPtr beta_accessor = nullptr, ITensorAccessorUPtr gamma_accessor = nullptr);
+    static NodeID add_batch_normalization_node(Graph              &g,
+                                               NodeParams          params,
+                                               NodeIdxPair         input,
+                                               float               epsilon,
+                                               ITensorAccessorUPtr mean_accessor  = nullptr,
+                                               ITensorAccessorUPtr var_accessor   = nullptr,
+                                               ITensorAccessorUPtr beta_accessor  = nullptr,
+                                               ITensorAccessorUPtr gamma_accessor = nullptr);
     /** Adds a bounding box transform layer node to the graph
      *
      * @param[in] g      Graph to add the node to
@@ -125,7 +140,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_bounding_box_transform_node(Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair deltas, BoundingBoxTransformInfo info);
+    static NodeID add_bounding_box_transform_node(
+        Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair deltas, BoundingBoxTransformInfo info);
     /** Adds an channel shuffle layer node to the graph
      *
      * @param[in] g          Graph to add the node to
@@ -154,10 +170,17 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
-                                       Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info, unsigned int num_groups = 1,
-                                       ConvolutionMethod method = ConvolutionMethod::Default, FastMathHint fast_math_hint = FastMathHint::Disabled,
-                                       ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr,
+    static NodeID add_convolution_node(Graph                  &g,
+                                       NodeParams              params,
+                                       NodeIdxPair             input,
+                                       Size2D                  kernel_spatial_extend,
+                                       unsigned int            depth,
+                                       PadStrideInfo           conv_info,
+                                       unsigned int            num_groups         = 1,
+                                       ConvolutionMethod       method             = ConvolutionMethod::Default,
+                                       FastMathHint            fast_math_hint     = FastMathHint::Disabled,
+                                       ITensorAccessorUPtr     weights_accessor   = nullptr,
+                                       ITensorAccessorUPtr     bias_accessor      = nullptr,
                                        const QuantizationInfo &weights_quant_info = QuantizationInfo(),
                                        const QuantizationInfo &out_quant_info     = QuantizationInfo());
     /** Adds a deconvolution layer node to the graph
@@ -173,9 +196,14 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_deconvolution_node(Graph &g, NodeParams params, NodeIdxPair input,
-                                         Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo deconv_info,
-                                         ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
+    static NodeID add_deconvolution_node(Graph              &g,
+                                         NodeParams          params,
+                                         NodeIdxPair         input,
+                                         Size2D              kernel_spatial_extend,
+                                         unsigned int        depth,
+                                         PadStrideInfo       deconv_info,
+                                         ITensorAccessorUPtr weights_accessor = nullptr,
+                                         ITensorAccessorUPtr bias_accessor    = nullptr);
     /** Adds a depth concatenate node to the graph
      *
      * @param[in] g                 Graph to add the node to
@@ -185,7 +213,10 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_concatenate_node(Graph &g, NodeParams params, const std::vector<NodeIdxPair> &inputs, const descriptors::ConcatLayerDescriptor &concat_descriptor);
+    static NodeID add_concatenate_node(Graph                                    &g,
+                                       NodeParams                                params,
+                                       const std::vector<NodeIdxPair>           &inputs,
+                                       const descriptors::ConcatLayerDescriptor &concat_descriptor);
     /** Adds an depth to space layer node to the graph
      *
      * @param[in] g           Graph to add the node to
@@ -212,11 +243,18 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
-                                                 Size2D kernel_spatial_extend, PadStrideInfo conv_info, int depth_multiplier = 1,
-                                                 DepthwiseConvolutionMethod method    = DepthwiseConvolutionMethod::Default,
-                                                 ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr, const QuantizationInfo &quant_info = QuantizationInfo(),
-                                                 const QuantizationInfo &out_quant_info = QuantizationInfo());
+    static NodeID
+    add_depthwise_convolution_node(Graph                     &g,
+                                   NodeParams                 params,
+                                   NodeIdxPair                input,
+                                   Size2D                     kernel_spatial_extend,
+                                   PadStrideInfo              conv_info,
+                                   int                        depth_multiplier = 1,
+                                   DepthwiseConvolutionMethod method           = DepthwiseConvolutionMethod::Default,
+                                   ITensorAccessorUPtr        weights_accessor = nullptr,
+                                   ITensorAccessorUPtr        bias_accessor    = nullptr,
+                                   const QuantizationInfo    &quant_info       = QuantizationInfo(),
+                                   const QuantizationInfo    &out_quant_info   = QuantizationInfo());
     /** Adds an element-wise layer node to the graph
      *
      * @param[in] g         Graph to add the node to
@@ -227,7 +265,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
+    static NodeID add_elementwise_node(
+        Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
     /** Adds a dequantization node to the graph
      *
      * @param[in] g      Graph to add the node to
@@ -248,7 +287,12 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_detection_output_node(Graph &g, NodeParams params, NodeIdxPair input_loc, NodeIdxPair input_conf, NodeIdxPair input_priorbox, const DetectionOutputLayerInfo &detect_info);
+    static NodeID add_detection_output_node(Graph                          &g,
+                                            NodeParams                      params,
+                                            NodeIdxPair                     input_loc,
+                                            NodeIdxPair                     input_conf,
+                                            NodeIdxPair                     input_priorbox,
+                                            const DetectionOutputLayerInfo &detect_info);
     /** Adds a detection post process layer node to the graph
      *
      * @param[in] g                      Graph to add the node to
@@ -261,8 +305,12 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_detection_post_process_node(Graph &g, NodeParams params, NodeIdxPair input_box_encoding, NodeIdxPair input_class_prediction,
-                                                  const DetectionPostProcessLayerInfo &detect_info, ITensorAccessorUPtr anchors_accessor = nullptr,
+    static NodeID add_detection_post_process_node(Graph                               &g,
+                                                  NodeParams                           params,
+                                                  NodeIdxPair                          input_box_encoding,
+                                                  NodeIdxPair                          input_class_prediction,
+                                                  const DetectionPostProcessLayerInfo &detect_info,
+                                                  ITensorAccessorUPtr                  anchors_accessor = nullptr,
                                                   const QuantizationInfo &anchor_quant_info = QuantizationInfo());
     /** Adds a Dummy node to the graph
      *
@@ -299,8 +347,12 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_fully_connected_layer(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_outputs,
-                                            NodeID weights_nid, NodeID bias_nid = EmptyNodeID,
+    static NodeID add_fully_connected_layer(Graph                        &g,
+                                            NodeParams                    params,
+                                            NodeIdxPair                   input,
+                                            unsigned int                  num_outputs,
+                                            NodeID                        weights_nid,
+                                            NodeID                        bias_nid       = EmptyNodeID,
                                             const FullyConnectedLayerInfo fc_info        = FullyConnectedLayerInfo(),
                                             const QuantizationInfo       &out_quant_info = QuantizationInfo(),
                                             FastMathHint                  fast_math_hint = FastMathHint::Disabled);
@@ -319,9 +371,13 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_fully_connected_layer(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_outputs,
-                                            ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr,
-                                            const FullyConnectedLayerInfo fc_info            = FullyConnectedLayerInfo(),
+    static NodeID add_fully_connected_layer(Graph                        &g,
+                                            NodeParams                    params,
+                                            NodeIdxPair                   input,
+                                            unsigned int                  num_outputs,
+                                            ITensorAccessorUPtr           weights_accessor = nullptr,
+                                            ITensorAccessorUPtr           bias_accessor    = nullptr,
+                                            const FullyConnectedLayerInfo fc_info          = FullyConnectedLayerInfo(),
                                             const QuantizationInfo       &weights_quant_info = QuantizationInfo(),
                                             const QuantizationInfo       &out_quant_info     = QuantizationInfo(),
                                             FastMathHint                  fast_math_hint     = FastMathHint::Disabled);
@@ -336,8 +392,12 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_generate_proposals_node(Graph &g, NodeParams params, NodeIdxPair scores, NodeIdxPair deltas,
-                                              NodeIdxPair anchors, GenerateProposalsInfo info);
+    static NodeID add_generate_proposals_node(Graph                &g,
+                                              NodeParams            params,
+                                              NodeIdxPair           scores,
+                                              NodeIdxPair           deltas,
+                                              NodeIdxPair           anchors,
+                                              GenerateProposalsInfo info);
     /** Adds a L2 Normalize layer node to the graph
      *
      * @param[in] g       Graph to add the node to
@@ -358,7 +418,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, NormalizationLayerInfo norm_info);
+    static NodeID
+    add_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, NormalizationLayerInfo norm_info);
     /** Adds a normalize planar YUV layer node to the graph
      *
      * @param[in] g             Graph to add the node to
@@ -369,8 +430,11 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_normalize_planar_yuv_node(Graph &g, NodeParams params, NodeIdxPair input,
-                                                ITensorAccessorUPtr mean_accessor = nullptr, ITensorAccessorUPtr std_accessor = nullptr);
+    static NodeID add_normalize_planar_yuv_node(Graph              &g,
+                                                NodeParams          params,
+                                                NodeIdxPair         input,
+                                                ITensorAccessorUPtr mean_accessor = nullptr,
+                                                ITensorAccessorUPtr std_accessor  = nullptr);
     /** Adds a pad layer node to the graph
      *
      * @param[in] g         Graph to add the node to
@@ -382,7 +446,11 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_pad_node(Graph &g, NodeParams params, NodeIdxPair input, const PaddingList &paddings, PixelValue pad_value = PixelValue());
+    static NodeID add_pad_node(Graph             &g,
+                               NodeParams         params,
+                               NodeIdxPair        input,
+                               const PaddingList &paddings,
+                               PixelValue         pad_value = PixelValue());
     /** Adds a permute layer node to the graph
      *
      * @param[in] g      Graph to add the node to
@@ -394,7 +462,11 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_permute_node(Graph &g, NodeParams params, NodeIdxPair input, PermutationVector perm, DataLayout layout = DataLayout::UNKNOWN);
+    static NodeID add_permute_node(Graph            &g,
+                                   NodeParams        params,
+                                   NodeIdxPair       input,
+                                   PermutationVector perm,
+                                   DataLayout        layout = DataLayout::UNKNOWN);
     /** Adds a pooling layer node to the graph
      *
      * @param[in] g         Graph to add the node to
@@ -426,8 +498,12 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_print_node(Graph &g, NodeParams params, NodeIdxPair input, std::ostream &stream, const IOFormatInfo &format_info = IOFormatInfo(),
-                                 const std::function<ITensor *(ITensor *)> transform = nullptr);
+    static NodeID add_print_node(Graph                                    &g,
+                                 NodeParams                                params,
+                                 NodeIdxPair                               input,
+                                 std::ostream                             &stream,
+                                 const IOFormatInfo                       &format_info = IOFormatInfo(),
+                                 const std::function<ITensor *(ITensor *)> transform   = nullptr);
     /** Adds a priorbox layer node to the graph
      *
      * @param[in] g          Graph to add the node to
@@ -438,7 +514,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_priorbox_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, const PriorBoxLayerInfo &prior_info);
+    static NodeID add_priorbox_node(
+        Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, const PriorBoxLayerInfo &prior_info);
     /** Adds a quantization layer node to the graph
      *
      * @param[in] g              Graph to add the node to
@@ -448,7 +525,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_quantization_node(Graph &g, NodeParams params, NodeIdxPair input, const QuantizationInfo &out_quant_info);
+    static NodeID
+    add_quantization_node(Graph &g, NodeParams params, NodeIdxPair input, const QuantizationInfo &out_quant_info);
     /** Adds a reduction sum layer node to the graph
      *
      * @param[in] g         Graph to add the node to
@@ -460,7 +538,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_reduction_operation_node(Graph &g, NodeParams params, NodeIdxPair input, ReductionOperation op, int axis, bool keep_dims = true);
+    static NodeID add_reduction_operation_node(
+        Graph &g, NodeParams params, NodeIdxPair input, ReductionOperation op, int axis, bool keep_dims = true);
     /** Adds a reorg layer node to the graph
      *
      * @param[in] g      Graph to add the node to
@@ -492,7 +571,12 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_resize_node(Graph &g, NodeParams params, NodeIdxPair input, InterpolationPolicy policy, float width_scale, float height_scale);
+    static NodeID add_resize_node(Graph              &g,
+                                  NodeParams          params,
+                                  NodeIdxPair         input,
+                                  InterpolationPolicy policy,
+                                  float               width_scale,
+                                  float               height_scale);
     /** Adds a ROI align layer node to the graph
      *
      * @param[in] g         Graph to add the node to
@@ -503,7 +587,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_roi_align_node(Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair rois, ROIPoolingLayerInfo pool_info);
+    static NodeID
+    add_roi_align_node(Graph &g, NodeParams params, NodeIdxPair input, NodeIdxPair rois, ROIPoolingLayerInfo pool_info);
     /** Adds a scale layer node to the graph
      * This layer computes a product of the input with a scale (read from mul_accessor) and it applies an offset (read from add_accessor).
      * output = input * mul_w + add_w
@@ -516,8 +601,11 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_scale_layer(Graph &g, const NodeParams &params, NodeIdxPair input,
-                                  ITensorAccessorUPtr mul_accessor = nullptr, ITensorAccessorUPtr add_accessor = nullptr);
+    static NodeID add_scale_layer(Graph              &g,
+                                  const NodeParams   &params,
+                                  NodeIdxPair         input,
+                                  ITensorAccessorUPtr mul_accessor = nullptr,
+                                  ITensorAccessorUPtr add_accessor = nullptr);
     /** Adds a softmax node to the graph
      *
      * @param[in] g      Graph to add the node to
@@ -538,7 +626,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_slice_node(Graph &g, NodeParams params, NodeIdxPair input, Coordinates &starts, Coordinates &ends);
+    static NodeID
+    add_slice_node(Graph &g, NodeParams params, NodeIdxPair input, Coordinates &starts, Coordinates &ends);
     /** Adds a split node to the graph
      *
      * @param[in] g          Graph to add the node to
@@ -549,7 +638,8 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_split_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_splits, unsigned int axis = 0);
+    static NodeID
+    add_split_node(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_splits, unsigned int axis = 0);
     /** Adds a stack layer node to the graph
      *
      * @param[in] g      Graph to add the node to
@@ -572,7 +662,13 @@
      *
      * @return Node ID of the created node, EmptyNodeID in case of error
      */
-    static NodeID add_strided_slice_node(Graph &g, NodeParams params, NodeIdxPair input, Coordinates &starts, Coordinates &ends, BiStrides &strides, StridedSliceLayerInfo info);
+    static NodeID add_strided_slice_node(Graph                &g,
+                                         NodeParams            params,
+                                         NodeIdxPair           input,
+                                         Coordinates          &starts,
+                                         Coordinates          &ends,
+                                         BiStrides            &strides,
+                                         StridedSliceLayerInfo info);
     /** Adds a yolo layer to the graph
      *
      * @param[in] g        Graph to add the node to
diff --git a/arm_compute/graph/GraphContext.h b/arm_compute/graph/GraphContext.h
index 7beb598..68fbaf5 100644
--- a/arm_compute/graph/GraphContext.h
+++ b/arm_compute/graph/GraphContext.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_GRAPH_CONTEXT_H
 
 #include "arm_compute/graph/Types.h"
-
 #include "arm_compute/runtime/IMemoryManager.h"
 #include "arm_compute/runtime/IWeightsManager.h"
 
@@ -39,18 +38,18 @@
 /** Contains structs required for memory management */
 struct MemoryManagerContext
 {
-    Target                                       target      = { Target::UNSPECIFIED }; /**< Target */
-    std::shared_ptr<arm_compute::IMemoryManager> intra_mm    = { nullptr };             /**< Intra-function memory manager */
-    std::shared_ptr<arm_compute::IMemoryManager> cross_mm    = { nullptr };             /**< Cross-function memory manager */
-    std::shared_ptr<arm_compute::IMemoryGroup>   cross_group = { nullptr };             /**< Cross-function memory group */
-    IAllocator                                  *allocator   = { nullptr };             /**< Backend allocator to use */
+    Target                                       target      = {Target::UNSPECIFIED}; /**< Target */
+    std::shared_ptr<arm_compute::IMemoryManager> intra_mm    = {nullptr}; /**< Intra-function memory manager */
+    std::shared_ptr<arm_compute::IMemoryManager> cross_mm    = {nullptr}; /**< Cross-function memory manager */
+    std::shared_ptr<arm_compute::IMemoryGroup>   cross_group = {nullptr}; /**< Cross-function memory group */
+    IAllocator                                  *allocator   = {nullptr}; /**< Backend allocator to use */
 };
 
 /** Contains structs required for weights management */
 struct WeightsManagerContext
 {
-    Target                                        target = { Target::UNSPECIFIED }; /**< Target */
-    std::shared_ptr<arm_compute::IWeightsManager> wm     = { nullptr };             /**< Weights manager */
+    Target                                        target = {Target::UNSPECIFIED}; /**< Target */
+    std::shared_ptr<arm_compute::IWeightsManager> wm     = {nullptr};             /**< Weights manager */
 };
 
 /** Graph context **/
@@ -125,7 +124,7 @@
     void finalize();
 
 private:
-    GraphConfig _config;                                       /**< Graph configuration */
+    GraphConfig                             _config;           /**< Graph configuration */
     std::map<Target, MemoryManagerContext>  _memory_managers;  /**< Memory managers for each target */
     std::map<Target, WeightsManagerContext> _weights_managers; /**< Weights managers for each target */
 };
diff --git a/arm_compute/graph/IDeviceBackend.h b/arm_compute/graph/IDeviceBackend.h
index f84aac0..8ae92e3 100644
--- a/arm_compute/graph/IDeviceBackend.h
+++ b/arm_compute/graph/IDeviceBackend.h
@@ -88,7 +88,8 @@
      *
      * @return Backend sub-tensor handle
      */
-    virtual std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) = 0;
+    virtual std::unique_ptr<ITensorHandle>
+    create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) = 0;
     /** Configure a backend Node
      *
      * @note This creates an appropriate configured backend function for the given node
diff --git a/arm_compute/graph/LayerDescriptors.h b/arm_compute/graph/LayerDescriptors.h
index c11174f..d632ed9 100644
--- a/arm_compute/graph/LayerDescriptors.h
+++ b/arm_compute/graph/LayerDescriptors.h
@@ -37,8 +37,7 @@
 struct ConcatLayerDescriptor
 {
     /** Default constructor */
-    ConcatLayerDescriptor()
-        : axis(DataLayoutDimension::CHANNEL), output_qinfo()
+    ConcatLayerDescriptor() : axis(DataLayoutDimension::CHANNEL), output_qinfo()
     {
     }
 
@@ -46,8 +45,7 @@
      *
      * @param[in] axis Axis.
      */
-    ConcatLayerDescriptor(DataLayoutDimension axis)
-        : axis(axis), output_qinfo()
+    ConcatLayerDescriptor(DataLayoutDimension axis) : axis(axis), output_qinfo()
     {
     }
 
@@ -76,9 +74,16 @@
      * @param[in] r_policy         (Optional) Rounding policy used for the operation. Defaults to @ref RoundingPolicy::TO_ZERO
      * @param[in] fused_activation (Optional) Fused activation information. Defaults to empty (identity) @ref ActivationLayerInfo
      */
-    EltwiseLayerDescriptor(EltwiseOperation op, QuantizationInfo out_quant_info = QuantizationInfo(), ConvertPolicy c_policy = ConvertPolicy::SATURATE, RoundingPolicy r_policy = RoundingPolicy::TO_ZERO,
+    EltwiseLayerDescriptor(EltwiseOperation    op,
+                           QuantizationInfo    out_quant_info   = QuantizationInfo(),
+                           ConvertPolicy       c_policy         = ConvertPolicy::SATURATE,
+                           RoundingPolicy      r_policy         = RoundingPolicy::TO_ZERO,
                            ActivationLayerInfo fused_activation = ActivationLayerInfo())
-        : op(op), out_quant_info(out_quant_info), c_policy(c_policy), r_policy(r_policy), fused_activation(fused_activation)
+        : op(op),
+          out_quant_info(out_quant_info),
+          c_policy(c_policy),
+          r_policy(r_policy),
+          fused_activation(fused_activation)
     {
     }
 
@@ -100,10 +105,16 @@
      * @param[in] r_policy         (Optional) Rounding policy used for the operation. Defaults to @ref RoundingPolicy::TO_ZERO
      * @param[in] fused_activation (Optional) Fused activation information. Defaults to empty (identity) @ref ActivationLayerInfo
      */
-    UnaryEltwiseLayerDescriptor(UnaryEltwiseOperation op, QuantizationInfo out_quant_info = QuantizationInfo(), ConvertPolicy c_policy = ConvertPolicy::SATURATE,
-                                RoundingPolicy      r_policy         = RoundingPolicy::TO_ZERO,
-                                ActivationLayerInfo fused_activation = ActivationLayerInfo())
-        : op(op), out_quant_info(out_quant_info), c_policy(c_policy), r_policy(r_policy), fused_activation(fused_activation)
+    UnaryEltwiseLayerDescriptor(UnaryEltwiseOperation op,
+                                QuantizationInfo      out_quant_info   = QuantizationInfo(),
+                                ConvertPolicy         c_policy         = ConvertPolicy::SATURATE,
+                                RoundingPolicy        r_policy         = RoundingPolicy::TO_ZERO,
+                                ActivationLayerInfo   fused_activation = ActivationLayerInfo())
+        : op(op),
+          out_quant_info(out_quant_info),
+          c_policy(c_policy),
+          r_policy(r_policy),
+          fused_activation(fused_activation)
     {
     }
 
@@ -130,7 +141,7 @@
     PadStrideInfo    info;           /**< Padding and stride information */
     QuantizationInfo out_quant_info; /**< Output quantization information */
 };
-} // namespace descriptor
+} // namespace descriptors
 } // namespace graph
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_LAYER_DESCRIPTORS_H */
\ No newline at end of file
+#endif /* ARM_COMPUTE_LAYER_DESCRIPTORS_H */
diff --git a/arm_compute/graph/Logger.h b/arm_compute/graph/Logger.h
index 872c650..e83d5f4 100644
--- a/arm_compute/graph/Logger.h
+++ b/arm_compute/graph/Logger.h
@@ -31,14 +31,14 @@
  *
  * @note It will eventually create all default loggers in don't exist
  */
-#define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER()                                  \
-    do                                                                             \
-    {                                                                              \
-        if(arm_compute::logging::LoggerRegistry::get().logger("GRAPH") == nullptr) \
-        {                                                                          \
-            arm_compute::logging::LoggerRegistry::get().create_reserved_loggers(); \
-        }                                                                          \
-    } while(false)
+#define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER()                                   \
+    do                                                                              \
+    {                                                                               \
+        if (arm_compute::logging::LoggerRegistry::get().logger("GRAPH") == nullptr) \
+        {                                                                           \
+            arm_compute::logging::LoggerRegistry::get().create_reserved_loggers();  \
+        }                                                                           \
+    } while (false)
 #else /* ARM_COMPUTE_LOGGING_ENABLED */
 #define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER()
 #endif /* ARM_COMPUTE_LOGGING_ENABLED */
diff --git a/arm_compute/graph/Tensor.h b/arm_compute/graph/Tensor.h
index de96c99..0ffae28 100644
--- a/arm_compute/graph/Tensor.h
+++ b/arm_compute/graph/Tensor.h
@@ -24,11 +24,10 @@
 #ifndef ARM_COMPUTE_GRAPH_TENSOR_H
 #define ARM_COMPUTE_GRAPH_TENSOR_H
 
-#include "arm_compute/graph/Types.h"
-
 #include "arm_compute/graph/ITensorAccessor.h"
 #include "arm_compute/graph/ITensorHandle.h"
 #include "arm_compute/graph/TensorDescriptor.h"
+#include "arm_compute/graph/Types.h"
 
 #include <memory>
 #include <set>
diff --git a/arm_compute/graph/TensorDescriptor.h b/arm_compute/graph/TensorDescriptor.h
index 5fa155e..46a6ab2 100644
--- a/arm_compute/graph/TensorDescriptor.h
+++ b/arm_compute/graph/TensorDescriptor.h
@@ -52,7 +52,11 @@
                      QuantizationInfo tensor_quant_info  = QuantizationInfo(),
                      DataLayout       tensor_data_layout = DataLayout::NCHW,
                      Target           tensor_target      = Target::UNSPECIFIED)
-        : shape(tensor_shape), data_type(tensor_data_type), layout(tensor_data_layout), quant_info(tensor_quant_info), target(tensor_target)
+        : shape(tensor_shape),
+          data_type(tensor_data_type),
+          layout(tensor_data_layout),
+          quant_info(tensor_quant_info),
+          target(tensor_target)
     {
     }
     /** Sets tensor descriptor shape
@@ -106,11 +110,11 @@
         return std::make_unique<TensorDescriptor>(*this);
     }
 
-    TensorShape      shape{};                        /**< Tensor shape */
-    DataType         data_type{ DataType::UNKNOWN }; /**< Data type */
-    DataLayout       layout{ DataLayout::NCHW };     /**< Data layout */
-    QuantizationInfo quant_info{};                   /**< Quantization info */
-    Target           target{ Target::UNSPECIFIED };  /**< Target */
+    TensorShape      shape{};                      /**< Tensor shape */
+    DataType         data_type{DataType::UNKNOWN}; /**< Data type */
+    DataLayout       layout{DataLayout::NCHW};     /**< Data layout */
+    QuantizationInfo quant_info{};                 /**< Quantization info */
+    Target           target{Target::UNSPECIFIED};  /**< Target */
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 9df4eba..5e83820 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -37,7 +37,7 @@
 /** Formatted output of the Target. */
 inline ::std::ostream &operator<<(::std::ostream &os, const Target &target)
 {
-    switch(target)
+    switch (target)
     {
         case Target::UNSPECIFIED:
             os << "UNSPECIFIED";
@@ -60,7 +60,7 @@
 
 inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
 {
-    switch(node_type)
+    switch (node_type)
     {
         case NodeType::ActivationLayer:
             os << "ActivationLayer";
@@ -207,7 +207,7 @@
 /** Formatted output of the EltwiseOperation type. */
 inline ::std::ostream &operator<<(::std::ostream &os, const EltwiseOperation &eltwise_op)
 {
-    switch(eltwise_op)
+    switch (eltwise_op)
     {
         case EltwiseOperation::Add:
             os << "Add";
@@ -231,7 +231,7 @@
 /** Formatted output of the ConvolutionMethod type. */
 inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &method)
 {
-    switch(method)
+    switch (method)
     {
         case ConvolutionMethod::Default:
             os << "Default";
@@ -255,7 +255,7 @@
 /** Formatted output of the FastMathHint type. */
 inline ::std::ostream &operator<<(::std::ostream &os, const FastMathHint &hint)
 {
-    switch(hint)
+    switch (hint)
     {
         case FastMathHint::Enabled:
             os << "Enabled";
@@ -273,7 +273,7 @@
 /** Formatted output of the DepthwiseConvolutionMethod type. */
 inline ::std::ostream &operator<<(::std::ostream &os, const DepthwiseConvolutionMethod &method)
 {
-    switch(method)
+    switch (method)
     {
         case DepthwiseConvolutionMethod::Default:
             os << "DEFAULT";
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 8d49340..5541e3c 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -86,17 +86,18 @@
 /** Graph configuration structure */
 struct GraphConfig
 {
-    bool          use_function_memory_manager{ true };   /**< Use a memory manager to manage per-function auxilary memory */
-    bool          use_function_weights_manager{ true };  /**< Use a weights manager to manage transformed weights */
-    bool          use_transition_memory_manager{ true }; /**< Use a memory manager to manager transition buffer memory */
-    bool          use_tuner{ false };                    /**< Use a tuner in tunable backends */
-    bool          use_synthetic_type{ false };           /**< Convert graph to a synthetic graph for a data type */
-    DataType      synthetic_type{ DataType::QASYMM8 };   /**< The data type of the synthetic graph  */
-    CLTunerMode   tuner_mode{ CLTunerMode::EXHAUSTIVE }; /**< Tuner mode to be used by the CL tuner */
-    int           num_threads{ -1 };                     /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is. */
-    std::string   tuner_file{ "acl_tuner.csv" };         /**< File to load/store tuning values from */
-    std::string   mlgo_file{ "heuristics.mlgo" };        /**< Filename to load MLGO heuristics from */
-    CLBackendType backend_type{ CLBackendType::Native }; /**< CL backend type to use */
+    bool        use_function_memory_manager{true};   /**< Use a memory manager to manage per-function auxilary memory */
+    bool        use_function_weights_manager{true};  /**< Use a weights manager to manage transformed weights */
+    bool        use_transition_memory_manager{true}; /**< Use a memory manager to manager transition buffer memory */
+    bool        use_tuner{false};                    /**< Use a tuner in tunable backends */
+    bool        use_synthetic_type{false};           /**< Convert graph to a synthetic graph for a data type */
+    DataType    synthetic_type{DataType::QASYMM8};   /**< The data type of the synthetic graph  */
+    CLTunerMode tuner_mode{CLTunerMode::EXHAUSTIVE}; /**< Tuner mode to be used by the CL tuner */
+    int         num_threads{
+        -1}; /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is. */
+    std::string   tuner_file{"acl_tuner.csv"};         /**< File to load/store tuning values from */
+    std::string   mlgo_file{"heuristics.mlgo"};        /**< Filename to load MLGO heuristics from */
+    CLBackendType backend_type{CLBackendType::Native}; /**< CL backend type to use */
 };
 
 /**< Device target types */
diff --git a/arm_compute/graph/Utils.h b/arm_compute/graph/Utils.h
index a3d9012..9813ff0 100644
--- a/arm_compute/graph/Utils.h
+++ b/arm_compute/graph/Utils.h
@@ -36,7 +36,7 @@
 
 inline bool is_utility_node(INode *node)
 {
-    std::set<NodeType> utility_node_types = { NodeType::PrintLayer };
+    std::set<NodeType> utility_node_types = {NodeType::PrintLayer};
     return utility_node_types.find(node->type()) != utility_node_types.end();
 }
 
diff --git a/arm_compute/graph/Workload.h b/arm_compute/graph/Workload.h
index 5b4533c..8ff0a54 100644
--- a/arm_compute/graph/Workload.h
+++ b/arm_compute/graph/Workload.h
@@ -69,8 +69,7 @@
  */
 struct ExecutionTask
 {
-    ExecutionTask(std::unique_ptr<arm_compute::IFunction> &&f, INode *n)
-        : task(std::move(f)), node(n)
+    ExecutionTask(std::unique_ptr<arm_compute::IFunction> &&f, INode *n) : task(std::move(f)), node(n)
     {
     }
     /** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -97,11 +96,11 @@
 /** Execution workload */
 struct ExecutionWorkload
 {
-    std::vector<Tensor *>      inputs  = {};          /**< Input handles */
-    std::vector<Tensor *>      outputs = {};          /**< Output handles */
-    std::vector<ExecutionTask> tasks   = {};          /**< Execution workload */
-    Graph                     *graph   = { nullptr }; /**< Graph bound to the workload */
-    GraphContext              *ctx     = { nullptr }; /**< Graph execution context */
+    std::vector<Tensor *>      inputs  = {};        /**< Input handles */
+    std::vector<Tensor *>      outputs = {};        /**< Output handles */
+    std::vector<ExecutionTask> tasks   = {};        /**< Execution workload */
+    Graph                     *graph   = {nullptr}; /**< Graph bound to the workload */
+    GraphContext              *ctx     = {nullptr}; /**< Graph execution context */
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/backends/BackendRegistrar.h b/arm_compute/graph/backends/BackendRegistrar.h
index 902c12b..2879361 100644
--- a/arm_compute/graph/backends/BackendRegistrar.h
+++ b/arm_compute/graph/backends/BackendRegistrar.h
@@ -24,8 +24,8 @@
 #ifndef ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H
 #define ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H
 
-#include "arm_compute/graph/Types.h"
 #include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/Types.h"
 
 #include <utility>
 
@@ -58,4 +58,4 @@
 } // namespace backends
 } // namespace graph
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H */
\ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_BACKEND_REGISTRAR_H */
diff --git a/arm_compute/graph/backends/CL/CLDeviceBackend.h b/arm_compute/graph/backends/CL/CLDeviceBackend.h
index 63674ad..09e19d7 100644
--- a/arm_compute/graph/backends/CL/CLDeviceBackend.h
+++ b/arm_compute/graph/backends/CL/CLDeviceBackend.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_CLDEVICEBACKEND_H
 
 #include "arm_compute/graph/IDeviceBackend.h"
-
 #include "arm_compute/runtime/CL/CLBufferAllocator.h"
 #include "arm_compute/runtime/CL/CLGEMMHeuristicsHandle.h"
 #include "arm_compute/runtime/CL/CLTuner.h"
@@ -59,22 +58,23 @@
     void set_kernel_tuning_mode(CLTunerMode tuning_mode);
 
     // Inherited overridden methods
-    void initialize_backend() override;
-    void setup_backend_context(GraphContext &ctx) override;
-    void release_backend_context(GraphContext &ctx) override;
+    void                           initialize_backend() override;
+    void                           setup_backend_context(GraphContext &ctx) override;
+    void                           release_backend_context(GraphContext &ctx) override;
     bool                           is_backend_supported() override;
     IAllocator                    *backend_allocator() override;
     std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
-    std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
-    std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
-    Status validate_node(INode &node) override;
-    std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
+    std::unique_ptr<ITensorHandle>
+    create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
+    std::unique_ptr<arm_compute::IFunction>       configure_node(INode &node, GraphContext &ctx) override;
+    Status                                        validate_node(INode &node) override;
+    std::shared_ptr<arm_compute::IMemoryManager>  create_memory_manager(MemoryManagerAffinity affinity) override;
     std::shared_ptr<arm_compute::IWeightsManager> create_weights_manager() override;
     void                                          sync() override;
 
 private:
-    int                                _context_count;   /**< Counts how many contexts are currently using the backend */
-    CLTuner                            _tuner;           /**< CL kernel tuner */
+    int                                _context_count; /**< Counts how many contexts are currently using the backend */
+    CLTuner                            _tuner;         /**< CL kernel tuner */
     CLGEMMHeuristicsHandle             _gemm_heuristics; /**< GEMM heuristics */
     std::unique_ptr<CLBufferAllocator> _allocator;       /**< CL buffer affinity allocator */
     std::string                        _tuner_file;      /**< Filename to load/store the tuner's values from */
diff --git a/arm_compute/graph/backends/CL/CLSubTensorHandle.h b/arm_compute/graph/backends/CL/CLSubTensorHandle.h
index 3750fc8..85eebec 100644
--- a/arm_compute/graph/backends/CL/CLSubTensorHandle.h
+++ b/arm_compute/graph/backends/CL/CLSubTensorHandle.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_CLSUBTENSORHANDLE_H
 
 #include "arm_compute/graph/ITensorHandle.h"
-
 #include "arm_compute/runtime/CL/CLSubTensor.h"
 
 namespace arm_compute
@@ -45,7 +44,10 @@
      * @param[in] coords        Starting coordinates
      * @param[in] extend_parent Extends parent shape if true
      */
-    CLSubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent = false);
+    CLSubTensorHandle(ITensorHandle     *parent_handle,
+                      const TensorShape &shape,
+                      const Coordinates &coords,
+                      bool               extend_parent = false);
     /** Destructor: free the tensor's memory */
     ~CLSubTensorHandle() = default;
     /** Allow instances of this class to be move constructed */
@@ -58,10 +60,10 @@
     CLSubTensorHandle &operator=(const CLSubTensorHandle &) = delete;
 
     // Inherited overridden methods
-    void allocate() override;
-    void free() override;
-    void manage(IMemoryGroup *mg) override;
-    void map(bool blocking) override;
+    void                        allocate() override;
+    void                        free() override;
+    void                        manage(IMemoryGroup *mg) override;
+    void                        map(bool blocking) override;
     void                        unmap() override;
     void                        release_if_unused() override;
     arm_compute::ITensor       &tensor() override;
diff --git a/arm_compute/graph/backends/CL/CLTensorHandle.h b/arm_compute/graph/backends/CL/CLTensorHandle.h
index 16e30ef..57e9794 100644
--- a/arm_compute/graph/backends/CL/CLTensorHandle.h
+++ b/arm_compute/graph/backends/CL/CLTensorHandle.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_CLTENSORHANDLE_H
 
 #include "arm_compute/graph/ITensorHandle.h"
-
 #include "arm_compute/runtime/CL/CLTensor.h"
 
 namespace arm_compute
@@ -51,10 +50,10 @@
     CLTensorHandle &operator=(CLTensorHandle &&) = default;
 
     // Inherited overridden methods
-    void allocate() override;
-    void free() override;
-    void manage(IMemoryGroup *mg) override;
-    void map(bool blocking) override;
+    void                        allocate() override;
+    void                        free() override;
+    void                        manage(IMemoryGroup *mg) override;
+    void                        map(bool blocking) override;
     void                        unmap() override;
     void                        release_if_unused() override;
     arm_compute::ITensor       &tensor() override;
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 877e1f9..fd8b6b5 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -24,19 +24,19 @@
 #ifndef ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
 #define ACL_ARM_COMPUTE_GRAPH_BACKENDS_FUNCTIONHELPERS_H
 
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
+#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
+#include "arm_compute/graph/backends/Utils.h"
 #include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 #include "arm_compute/graph/Tensor.h"
 #include "arm_compute/graph/TypePrinter.h"
 #include "arm_compute/graph/Types.h"
 #include "arm_compute/graph/Utils.h"
-#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
-#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
-#include "arm_compute/graph/backends/Utils.h"
-#include "arm_compute/graph/nodes/Nodes.h"
 
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensorInfo.h"
 #include "support/Cast.h"
 
 namespace arm_compute
@@ -59,13 +59,16 @@
 typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
 {
     typename TargetInfo::TensorType *backing_tensor = nullptr;
-    if(tensor != nullptr)
+    if (tensor != nullptr)
     {
         ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
         // Get backing tensor handle
         ITensorHandle *tensor_handle = tensor->handle();
         // Get backing tensor
-        backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
+        backing_tensor = (tensor_handle != nullptr)
+                             ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(
+                                   &tensor_handle->tensor())
+                             : nullptr;
     }
 
     return backing_tensor;
@@ -74,11 +77,8 @@
 template <typename TargetInfo>
 void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
-                                  << " Target: " << TargetInfo::TargetType
-                                  << " ID: " << node.id()
-                                  << node.name()
-                                  << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type() << " Target: " << TargetInfo::TargetType
+                                              << " ID: " << node.id() << node.name() << std::endl);
 
     ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
     ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
@@ -109,17 +109,11 @@
     auto func = std::make_unique<ActivationLayerFunction>();
     func->configure(input, output, act_info);
 
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << " Activation function: " << act_info.activation()
-                               << " a: " << act_info.a()
-                               << " b: " << act_info.b()
-                               << " InPlace : " << is_in_place_operation(input, output)
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO(
+        "Instantiated " << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                        << " Data Type: " << input->info()->data_type() << " Shape: " << input->info()->tensor_shape()
+                        << " Activation function: " << act_info.activation() << " a: " << act_info.a() << " b: "
+                        << act_info.b() << " InPlace : " << is_in_place_operation(input, output) << std::endl);
 
     return func;
 }
@@ -148,15 +142,10 @@
     auto func = std::make_unique<ArgMinMaxLayerFunction>();
     func->configure(input, axis, output, op);
 
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << " Reduction Operation: " << op
-                               << " axis: " << axis
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Shape: " << input->info()->tensor_shape()
+                                               << " Reduction Operation: " << op << " axis: " << axis << std::endl);
 
     return func;
 }
@@ -191,16 +180,11 @@
     func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << " Epsilon: " << epsilon << " "
-                               << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
-                               << " InPlace: " << is_in_place_operation(input, output)
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Shape: " << input->info()->tensor_shape() << " Epsilon: " << epsilon
+                                               << " " << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
+                                               << " InPlace: " << is_in_place_operation(input, output) << std::endl);
 
     return func;
 }
@@ -216,7 +200,8 @@
  * @return Backend batch normalization layer function
  */
 template <typename FusedLayerTypes, typename TargetInfo>
-std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
+std::unique_ptr<IFunction>
+create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 {
     validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
 
@@ -246,19 +231,16 @@
 
     // Create and configure function
     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
-                                    std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
+        std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta,
+        gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input->info()->data_type() << " Input shape: "
+                               << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
-                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
-                               << std::endl);
+                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
     return func;
 }
 
@@ -273,7 +255,9 @@
  * @return Backend fused depthwise convolution batch normalization layer function
  */
 template <typename FusedLayerTypes, typename TargetInfo>
-std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
+std::unique_ptr<IFunction>
+create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node,
+                                                             GraphContext                                    &ctx)
 {
     validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
 
@@ -302,19 +286,16 @@
 
     // Create and configure function
     std::tie(func, func_name) = create_named_memory_managed_function<FType>(
-                                    std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
+        std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var,
+        beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input->info()->data_type() << " Input shape: "
+                               << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
-                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
-                               << std::endl);
+                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
     return func;
 }
 
@@ -343,15 +324,11 @@
     func->configure(input, output, deltas, bbox_info);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << " BoundingBox Info img W: " << bbox_info.img_width() << " "
-                               << " BoundingBox Info img H: " << bbox_info.img_height() << " "
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO(
+        "Instantiated " << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                        << " Data Type: " << input->info()->data_type() << " Shape: " << input->info()->tensor_shape()
+                        << " BoundingBox Info img W: " << bbox_info.img_width() << " "
+                        << " BoundingBox Info img H: " << bbox_info.img_height() << " " << std::endl);
 
     return std::move(func);
 }
@@ -379,14 +356,10 @@
     auto func = std::make_unique<ChannelShuffleLayerFunction>();
     func->configure(input, output, num_groups);
 
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << " Num groups: " << num_groups
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Shape: " << input->info()->tensor_shape()
+                                               << " Num groups: " << num_groups << std::endl);
 
     return func;
 }
@@ -403,24 +376,25 @@
 template <typename ConcatenateLayerFunction, typename TargetInfo>
 std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name()
+                                                                         << std::endl);
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Return nullptr if depth concatenate is switched off
-    if(!node.is_enabled())
+    if (!node.is_enabled())
     {
         return nullptr;
     }
 
     // Extract IO and info
     std::vector<typename TargetInfo::SrcTensorType *> inputs;
-    for(unsigned int i = 0; i < node.num_inputs(); ++i)
+    for (unsigned int i = 0; i < node.num_inputs(); ++i)
     {
         inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
     }
-    typename TargetInfo::TensorType *output      = get_backing_tensor<TargetInfo>(node.output(0));
-    const DataLayout                 data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
-    const size_t                     concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
+    typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+    const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
+    const size_t     concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
 
     // Create and configure function
     auto func = std::make_unique<ConcatenateLayerFunction>();
@@ -429,20 +403,14 @@
     // Log info
     const bool         is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
     std::ostringstream qss;
-    if(is_quantized)
+    if (is_quantized)
     {
         qss << " Output QuantInfo: " << output->info()->quantization_info();
     }
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << output->info()->data_type()
-                               << " Shape: " << output->info()->tensor_shape()
-                               << " Num Inputs: " << inputs.size()
-                               << " Axis: " << concat_axis
-                               << qss.str()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO(
+        "Instantiated " << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                        << " Data Type: " << output->info()->data_type() << " Shape: " << output->info()->tensor_shape()
+                        << " Num Inputs: " << inputs.size() << " Axis: " << concat_axis << qss.str() << std::endl);
 
     return func;
 }
@@ -470,7 +438,7 @@
 
     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
 
-    if(is_quantized)
+    if (is_quantized)
     {
         biases->info()->set_data_type(DataType::S32);
     }
@@ -486,55 +454,50 @@
     std::unique_ptr<IFunction>      func;
     std::string                     func_name;
 
-    if(conv_algorithm == ConvolutionMethod::Winograd)
+    if (conv_algorithm == ConvolutionMethod::Winograd)
     {
         ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
-        std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
-                                        std::string("WinogradConvolutionLayer"), mm,
-                                        input, weights, biases, output, conv_info, fused_act, fast_math);
+        std::tie(func, func_name) =
+            create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
+                std::string("WinogradConvolutionLayer"), mm, input, weights, biases, output, conv_info, fused_act,
+                fast_math);
     }
-    else if(conv_algorithm == ConvolutionMethod::Direct)
+    else if (conv_algorithm == ConvolutionMethod::Direct)
     {
         ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
         std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
-                                        std::string("DirectConvolutionLayer"),
-                                        input, weights, biases, output, conv_info, fused_act);
+            std::string("DirectConvolutionLayer"), input, weights, biases, output, conv_info, fused_act);
     }
-    else if(conv_algorithm == ConvolutionMethod::GEMM)
+    else if (conv_algorithm == ConvolutionMethod::GEMM)
     {
-        std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
-                                        std::string("GEMMConvolutionLayer"), mm,
-                                        input, weights, biases, output, conv_info,
-                                        WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
+        std::tie(func, func_name) =
+            create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
+                std::string("GEMMConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(),
+                Size2D(1U, 1U), fused_act, num_groups);
     }
     else
     {
-        std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
-                                        std::string("GenericConvolutionLayer"), mm,
-                                        input, weights, biases, output, conv_info,
-                                        WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
+        std::tie(func, func_name) =
+            create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
+                std::string("GenericConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(),
+                Size2D(1U, 1U), fused_act, fast_math, num_groups);
     }
 
     // Log info
     std::ostringstream qss;
-    if(is_quantized)
+    if (is_quantized)
     {
         qss << " Input QuantInfo: " << input->info()->quantization_info()
             << " Weights QuantInfo: " << weights->info()->quantization_info()
             << " Output QuantInfo: " << output->info()->quantization_info();
     }
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << func_name
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Groups: " << num_groups
+                               << node.name() << " Type: " << func_name << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input->info()->data_type() << " Groups: " << num_groups
                                << " Input shape: " << input->info()->tensor_shape()
                                << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << qss.str()
-                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
-                               << std::endl);
+                               << " Output shape: " << output->info()->tensor_shape() << qss.str()
+                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
     return func;
 }
 
@@ -566,19 +529,14 @@
     std::unique_ptr<IFunction>      func;
 
     std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
-                                      std::string(), mm,
-                                      input, weights, biases, output, deconv_info);
+        std::string(), mm, input, weights, biases, output, deconv_info);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Weights shape: " << weights->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
     return func;
 }
 
@@ -604,7 +562,7 @@
 
     const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
 
-    if(is_quantized)
+    if (is_quantized)
     {
         biases->info()->set_data_type(DataType::S32);
     }
@@ -617,30 +575,25 @@
     std::unique_ptr<IFunction> func;
     std::string                func_name;
 
-    std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
-                                    std::string("DepthwiseConvolutionLayer"),
-                                    input, weights, biases, output, conv_info, depth_multiplier, fused_act);
+    std::tie(func, func_name) =
+        create_named_function<DepthwiseConvolutionLayer>(std::string("DepthwiseConvolutionLayer"), input, weights,
+                                                         biases, output, conv_info, depth_multiplier, fused_act);
 
     // Log info
     std::ostringstream qss;
-    if(is_quantized)
+    if (is_quantized)
     {
         qss << " Input QuantInfo: " << input->info()->quantization_info()
             << " Weights QuantInfo: " << weights->info()->quantization_info()
             << " Output QuantInfo: " << output->info()->quantization_info();
     }
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << func_name
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << node.name() << " Type: " << func_name << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input->info()->data_type() << " Input shape: "
+                               << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
-                               << " Depth multiplier: " << depth_multiplier
-                               << qss.str()
-                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
-                               << std::endl);
+                               << " Depth multiplier: " << depth_multiplier << qss.str()
+                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl);
     return func;
 }
 
@@ -670,15 +623,11 @@
     func->configure(input, output, node.block_shape());
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Block Size: " << node.block_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Block Size: " << node.block_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -709,15 +658,11 @@
     func->configure(input, output);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Input quantization info: " << output->info()->quantization_info()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Input quantization info: " << output->info()->quantization_info()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -753,16 +698,12 @@
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input0->info()->data_type()
-                               << " Input0 shape: " << input0->info()->tensor_shape()
-                               << " Input1 shape: " << input1->info()->tensor_shape()
+                               << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input0->info()->data_type() << " Input0 shape: "
+                               << input0->info()->tensor_shape() << " Input1 shape: " << input1->info()->tensor_shape()
                                << " Input2 shape: " << input2->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
-                               << " DetectionOutputLayer info: " << detect_info
-                               << std::endl);
+                               << " DetectionOutputLayer info: " << detect_info << std::endl);
 
     return func;
 }
@@ -805,19 +746,15 @@
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input0->info()->data_type()
-                               << " Input0 shape: " << input0->info()->tensor_shape()
-                               << " Input1 shape: " << input1->info()->tensor_shape()
+                               << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input0->info()->data_type() << " Input0 shape: "
+                               << input0->info()->tensor_shape() << " Input1 shape: " << input1->info()->tensor_shape()
                                << " Input2 shape: " << input2->info()->tensor_shape()
                                << " Output0 shape: " << output0->info()->tensor_shape()
                                << " Output1 shape: " << output1->info()->tensor_shape()
                                << " Output2 shape: " << output2->info()->tensor_shape()
                                << " Output3 shape: " << output3->info()->tensor_shape()
-                               << " DetectionPostProcessLayer info: " << detect_info
-                               << std::endl);
+                               << " DetectionPostProcessLayer info: " << detect_info << std::endl);
 
     return func;
 }
@@ -849,35 +786,31 @@
 
     std::unique_ptr<IFunction> func = nullptr;
     std::string                func_name;
-    if(eltwise_op == EltwiseOperation::Add)
+    if (eltwise_op == EltwiseOperation::Add)
     {
         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
-                                        std::string("ArithmeticAddition"),
-                                        input1, input2, output, convert_policy, act_info);
+            std::string("ArithmeticAddition"), input1, input2, output, convert_policy, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Sub)
+    else if (eltwise_op == EltwiseOperation::Sub)
     {
         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
-                                        std::string("ArithmeticSubtraction"),
-                                        input1, input2, output, convert_policy, act_info);
+            std::string("ArithmeticSubtraction"), input1, input2, output, convert_policy, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Mul)
+    else if (eltwise_op == EltwiseOperation::Mul)
     {
         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
-                                        std::string("PixelWiseMultiplication"),
-                                        input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
+            std::string("PixelWiseMultiplication"), input1, input2, output, 1.f, convert_policy, node.rounding_policy(),
+            act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Max)
+    else if (eltwise_op == EltwiseOperation::Max)
     {
         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
-                                        std::string("ElementwiseMaximum"),
-                                        input1, input2, output, act_info);
+            std::string("ElementwiseMaximum"), input1, input2, output, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Div)
+    else if (eltwise_op == EltwiseOperation::Div)
     {
         std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
-                                        std::string("ArithmeticDivision"),
-                                        input1, input2, output, act_info);
+            std::string("ArithmeticDivision"), input1, input2, output, act_info);
     }
     else
     {
@@ -885,14 +818,10 @@
     }
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Operation: " << func_name
-                               << " Data Type: " << input1->info()->data_type()
-                               << " Shape: " << input1->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type()
+                                               << " Target: " << TargetInfo::TargetType << " Operation: " << func_name
+                                               << " Data Type: " << input1->info()->data_type()
+                                               << " Shape: " << input1->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -921,11 +850,10 @@
 
     std::unique_ptr<IFunction> func = nullptr;
     std::string                func_name;
-    if(eltwise_op == UnaryEltwiseOperation::Exp)
+    if (eltwise_op == UnaryEltwiseOperation::Exp)
     {
-        std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
-                                        std::string("Exp"),
-                                        input, output);
+        std::tie(func, func_name) =
+            create_named_function<typename UnaryEltwiseFunctions::Exp>(std::string("Exp"), input, output);
     }
     else
     {
@@ -933,14 +861,10 @@
     }
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Operation: " << func_name
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type()
+                                               << " Target: " << TargetInfo::TargetType << " Operation: " << func_name
+                                               << " Data Type: " << input->info()->data_type()
+                                               << " Shape: " << input->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -971,14 +895,10 @@
     func->configure(input, output);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1020,22 +940,17 @@
 
     // Log info
     std::ostringstream qss;
-    if(is_quantized)
+    if (is_quantized)
     {
         qss << " Input QuantInfo: " << input->info()->quantization_info()
             << " Weights QuantInfo: " << weights->info()->quantization_info()
             << " Output QuantInfo: " << output->info()->quantization_info();
     }
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << qss.str()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << qss.str() << " Input shape: " << input->info()->tensor_shape()
+                                               << " Weights shape: " << weights->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1075,16 +990,14 @@
     func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
-                               << " Target " << TargetInfo::TargetType
-                               << " Data Type: " << scores->info()->data_type()
-                               << " Scores shape: " << scores->info()->tensor_shape()
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+                               << node.type() << " Target " << TargetInfo::TargetType << " Data Type: "
+                               << scores->info()->data_type() << " Scores shape: " << scores->info()->tensor_shape()
                                << " Deltas shape: " << deltas->info()->tensor_shape()
                                << " Anchors shape: " << anchors->info()->tensor_shape()
                                << " Proposals shape: " << proposals->info()->tensor_shape()
                                << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
-                               << " Scores Out shape: " << scores_out->info()->tensor_shape()
-                               << std::endl);
+                               << " Scores Out shape: " << scores_out->info()->tensor_shape() << std::endl);
 
     return std::move(func);
 }
@@ -1119,16 +1032,11 @@
     func->configure(input, output, axis, epsilon);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Axis: " << axis
-                               << " Epsilon: " << epsilon
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " Axis: " << axis << " Epsilon: " << epsilon << std::endl);
 
     return func;
 }
@@ -1162,15 +1070,11 @@
     func->configure(input, output, norm_info);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Normalization info: " << norm_info.type()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " Normalization info: " << norm_info.type() << std::endl);
 
     return std::move(func);
 }
@@ -1204,13 +1108,9 @@
     func->configure(input, output, mean, std);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Shape: " << input->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Shape: " << input->info()->tensor_shape() << std::endl);
 
     return std::move(func);
 }
@@ -1242,14 +1142,10 @@
     func->configure(input, output, padding, pad_value);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1280,15 +1176,11 @@
     func->configure(input, output, perm);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Permutation vector: " << perm
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " Permutation vector: " << perm << std::endl);
 
     return func;
 }
@@ -1319,15 +1211,11 @@
     func->configure(input, output, pool_info);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Pooling info: " << pool_info.pool_type
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " Pooling info: " << pool_info.pool_type << std::endl);
 
     return func;
 }
@@ -1358,14 +1246,10 @@
     func->configure(input, alpha, output);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1388,13 +1272,9 @@
     ARM_COMPUTE_UNUSED(input);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape() << std::endl);
 
     return nullptr;
 }
@@ -1428,15 +1308,11 @@
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input0->info()->data_type()
-                               << " Input0 shape: " << input0->info()->tensor_shape()
-                               << " Input1 shape: " << input1->info()->tensor_shape()
+                               << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
+                               << " Data Type: " << input0->info()->data_type() << " Input0 shape: "
+                               << input0->info()->tensor_shape() << " Input1 shape: " << input1->info()->tensor_shape()
                                << " Output shape: " << output->info()->tensor_shape()
-                               << " PriorBoxLayer info: " << prior_info
-                               << std::endl);
+                               << " PriorBoxLayer info: " << prior_info << std::endl);
 
     return func;
 }
@@ -1466,14 +1342,10 @@
     func->configure(input, output);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1508,16 +1380,11 @@
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
+                               << node.name() << " Type: " << node.type() << " Target: " << TargetInfo::TargetType
                                << " Data Type: " << input->info()->data_type()
                                << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Operation: " << op
-                               << " Axis: " << axis
-                               << " Keep dimensions:" << keep_dims
-                               << std::endl);
+                               << " Output shape: " << output->info()->tensor_shape() << " Operation: " << op
+                               << " Axis: " << axis << " Keep dimensions:" << keep_dims << std::endl);
 
     return func;
 }
@@ -1547,14 +1414,10 @@
     func->configure(input, output, node.stride());
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1584,14 +1447,10 @@
     func->configure(input, output);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1619,18 +1478,15 @@
 
     // Create and configure function
     auto func = std::make_unique<ResizeLayerFunction>();
-    func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
+    func->configure(input, output,
+                    ScaleKernelInfo{policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false});
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Interpolation: " << policy
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " Interpolation: " << policy << std::endl);
 
     return func;
 }
@@ -1665,17 +1521,13 @@
     func->configure(input, rois, output, pool_info);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " ROIs shape: " << rois->info()->tensor_shape()
-                               << " ROIPooling width: " << pool_info.pooled_width()
-                               << " ROIPooling height: " << pool_info.pooled_height()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " ROIs shape: " << rois->info()->tensor_shape()
+                                               << " ROIPooling width: " << pool_info.pooled_width()
+                                               << " ROIPooling height: " << pool_info.pooled_height() << std::endl);
 
     return std::move(func);
 }
@@ -1705,14 +1557,10 @@
     func->configure(input, output, node.starts(), node.ends());
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1744,14 +1592,10 @@
     func->configure(input, output, beta);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
@@ -1768,12 +1612,13 @@
 template <typename StackLayerFunction, typename TargetInfo>
 std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name()
+                                                                   << std::endl);
     ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
     std::vector<typename TargetInfo::TensorType *> inputs;
-    for(unsigned int i = 0; i < node.num_inputs(); ++i)
+    for (unsigned int i = 0; i < node.num_inputs(); ++i)
     {
         inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
     }
@@ -1785,16 +1630,12 @@
     func->configure(inputs, axis, output);
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << output->info()->data_type()
-                               << " Inputs shape: " << inputs[0]->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << " Num Inputs: " << inputs.size()
-                               << " Axis: " << axis
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type()
+                                               << " Target: " << TargetInfo::TargetType
+                                               << " Data Type: " << output->info()->data_type()
+                                               << " Inputs shape: " << inputs[0]->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape()
+                                               << " Num Inputs: " << inputs.size() << " Axis: " << axis << std::endl);
 
     return func;
 }
@@ -1829,14 +1670,10 @@
     func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
 
     // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << node.type()
-                               << " Target: " << TargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.name() << " Type: " << node.type() << " Target: "
+                                               << TargetInfo::TargetType << " Data Type: " << input->info()->data_type()
+                                               << " Input shape: " << input->info()->tensor_shape()
+                                               << " Output shape: " << output->info()->tensor_shape() << std::endl);
 
     return func;
 }
diff --git a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h
index 19c627d..27e21cb 100644
--- a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h
+++ b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h
@@ -70,15 +70,19 @@
      * @param[in]  fused_act  Activation layer information in case of a fused activation.
      *
      */
-    void configure(TensorType       *input,
-                   TensorType       *weights,
-                   TensorType       *bias,
-                   TensorType       *output,
-                   const TensorType *mean,
-                   const TensorType *var,
-                   const TensorType *beta,
-                   const TensorType *gamma,
-                   float epsilon, const PadStrideInfo &conv_info, unsigned int num_groups, bool fast_math, ActivationLayerInfo const &fused_act)
+    void configure(TensorType                *input,
+                   TensorType                *weights,
+                   TensorType                *bias,
+                   TensorType                *output,
+                   const TensorType          *mean,
+                   const TensorType          *var,
+                   const TensorType          *beta,
+                   const TensorType          *gamma,
+                   float                      epsilon,
+                   const PadStrideInfo       &conv_info,
+                   unsigned int               num_groups,
+                   bool                       fast_math,
+                   ActivationLayerInfo const &fused_act)
     {
         // We don't run any validate, as we assume that the layers have been already validated
         const bool        has_bias = (bias != nullptr);
@@ -86,7 +90,7 @@
 
         // We check if the layer has a bias. If yes, use it in-place. If not, we need to create one
         // as batch normalization might end up with a bias != 0
-        if(has_bias)
+        if (has_bias)
         {
             _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon);
             bias_to_use = bias;
@@ -97,9 +101,10 @@
             bias_to_use = &_fused_bias;
         }
 
-        _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
+        _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act,
+                              fast_math, num_groups);
 
-        if(!has_bias)
+        if (!has_bias)
         {
             _fused_bias.allocator()->allocate();
         }
@@ -114,7 +119,7 @@
 
     void prepare()
     {
-        if(!_is_prepared)
+        if (!_is_prepared)
         {
             _fused_batch_norm_layer.run();
             _is_prepared = true;
diff --git a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
index 4f8a8da..07a2cdd 100644
--- a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
+++ b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h
@@ -67,15 +67,18 @@
      * @param[in]  fused_act        Activation layer information in case of a fused activation.
      *
      */
-    void configure(TensorType       *input,
-                   TensorType       *weights,
-                   TensorType       *bias,
-                   TensorType       *output,
-                   const TensorType *mean,
-                   const TensorType *var,
-                   const TensorType *beta,
-                   const TensorType *gamma,
-                   float epsilon, const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo const &fused_act)
+    void configure(TensorType                *input,
+                   TensorType                *weights,
+                   TensorType                *bias,
+                   TensorType                *output,
+                   const TensorType          *mean,
+                   const TensorType          *var,
+                   const TensorType          *beta,
+                   const TensorType          *gamma,
+                   float                      epsilon,
+                   const PadStrideInfo       &conv_info,
+                   unsigned int               depth_multiplier,
+                   ActivationLayerInfo const &fused_act)
     {
         // We don't run any validate, as we assume that the layers have been already validated
         const bool        has_bias = (bias != nullptr);
@@ -83,20 +86,23 @@
 
         // We check if the layer has a bias. If yes, use it in-place. If not, we need to create one
         // as batch normalization might end up with a bias != 0
-        if(has_bias)
+        if (has_bias)
         {
-            _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
+            _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon,
+                                              FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
             bias_to_use = bias;
         }
         else
         {
-            _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
+            _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon,
+                                              FuseBatchNormalizationType::DEPTHWISECONVOLUTION);
             bias_to_use = &_fused_bias;
         }
 
-        _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier, fused_act.enabled() ? fused_act : ActivationLayerInfo());
+        _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier,
+                                    fused_act.enabled() ? fused_act : ActivationLayerInfo());
 
-        if(!has_bias)
+        if (!has_bias)
         {
             _fused_bias.allocator()->allocate();
         }
@@ -111,7 +117,7 @@
 
     void prepare()
     {
-        if(!_is_prepared)
+        if (!_is_prepared)
         {
             _fused_batch_norm_layer.run();
             _is_prepared = true;
diff --git a/arm_compute/graph/backends/NEON/NEDeviceBackend.h b/arm_compute/graph/backends/NEON/NEDeviceBackend.h
index 9cb37d4..cd817a2 100644
--- a/arm_compute/graph/backends/NEON/NEDeviceBackend.h
+++ b/arm_compute/graph/backends/NEON/NEDeviceBackend.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_NEDEVICEBACKEND_H
 
 #include "arm_compute/graph/IDeviceBackend.h"
-
 #include "arm_compute/runtime/Allocator.h"
 
 namespace arm_compute
@@ -41,16 +40,17 @@
     NEDeviceBackend();
 
     // Inherited overridden methods
-    void initialize_backend() override;
-    void setup_backend_context(GraphContext &ctx) override;
-    void release_backend_context(GraphContext &ctx) override;
+    void                           initialize_backend() override;
+    void                           setup_backend_context(GraphContext &ctx) override;
+    void                           release_backend_context(GraphContext &ctx) override;
     bool                           is_backend_supported() override;
     IAllocator                    *backend_allocator() override;
     std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
-    std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
-    std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
-    Status validate_node(INode &node) override;
-    std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
+    std::unique_ptr<ITensorHandle>
+    create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
+    std::unique_ptr<arm_compute::IFunction>       configure_node(INode &node, GraphContext &ctx) override;
+    Status                                        validate_node(INode &node) override;
+    std::shared_ptr<arm_compute::IMemoryManager>  create_memory_manager(MemoryManagerAffinity affinity) override;
     std::shared_ptr<arm_compute::IWeightsManager> create_weights_manager() override;
     void                                          sync() override;
 
diff --git a/arm_compute/graph/backends/NEON/NESubTensorHandle.h b/arm_compute/graph/backends/NEON/NESubTensorHandle.h
index a438b65..3619f4e 100644
--- a/arm_compute/graph/backends/NEON/NESubTensorHandle.h
+++ b/arm_compute/graph/backends/NEON/NESubTensorHandle.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_NESUBTENSORHANDLE_H
 
 #include "arm_compute/graph/ITensorHandle.h"
-
 #include "arm_compute/runtime/SubTensor.h"
 
 namespace arm_compute
@@ -45,7 +44,10 @@
      * @param[in] coords        Starting coordinates
      * @param[in] extend_parent Extends parent shape if true
      */
-    NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent = false);
+    NESubTensorHandle(ITensorHandle     *parent_handle,
+                      const TensorShape &shape,
+                      const Coordinates &coords,
+                      bool               extend_parent = false);
     /** Destructor: free the tensor's memory */
     ~NESubTensorHandle() = default;
     /** Allow instances of this class to be move constructed */
@@ -58,10 +60,10 @@
     NESubTensorHandle &operator=(const NESubTensorHandle &) = delete;
 
     // Inherited overridden methods
-    void allocate() override;
-    void free() override;
-    void manage(IMemoryGroup *mg) override;
-    void map(bool blocking) override;
+    void                        allocate() override;
+    void                        free() override;
+    void                        manage(IMemoryGroup *mg) override;
+    void                        map(bool blocking) override;
     void                        unmap() override;
     void                        release_if_unused() override;
     arm_compute::ITensor       &tensor() override;
diff --git a/arm_compute/graph/backends/NEON/NETensorHandle.h b/arm_compute/graph/backends/NEON/NETensorHandle.h
index 99101a8..1df9082 100644
--- a/arm_compute/graph/backends/NEON/NETensorHandle.h
+++ b/arm_compute/graph/backends/NEON/NETensorHandle.h
@@ -25,7 +25,6 @@
 #define ARM_COMPUTE_GRAPH_NETENSORHANDLE_H
 
 #include "arm_compute/graph/ITensorHandle.h"
-
 #include "arm_compute/runtime/Tensor.h"
 
 namespace arm_compute
@@ -51,10 +50,10 @@
     NETensorHandle &operator=(NETensorHandle &&) = default;
 
     // Inherited overridden methods
-    void allocate() override;
-    void free() override;
-    void manage(IMemoryGroup *mg) override;
-    void map(bool blocking) override;
+    void                        allocate() override;
+    void                        free() override;
+    void                        manage(IMemoryGroup *mg) override;
+    void                        map(bool blocking) override;
     void                        unmap() override;
     void                        release_if_unused() override;
     arm_compute::ITensor       &tensor() override;
diff --git a/arm_compute/graph/backends/Utils.h b/arm_compute/graph/backends/Utils.h
index 774ce51..5f4e66c 100644
--- a/arm_compute/graph/backends/Utils.h
+++ b/arm_compute/graph/backends/Utils.h
@@ -42,7 +42,8 @@
  * @return  A configured backend function
  */
 template <typename FunctionType, typename FunctionNameType, typename... ParameterType>
-std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_function(FunctionNameType name, ParameterType... args)
+std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_function(FunctionNameType name,
+                                                                                            ParameterType... args)
 {
     auto f = std::make_unique<FunctionType>();
     f->configure(std::forward<ParameterType>(args)...);
@@ -58,9 +59,8 @@
  * @return  A configured backend function
  */
 template <typename FunctionType, typename FunctionNameType, typename MemoryManagerType, typename... ParameterType>
-std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_memory_managed_function(FunctionNameType name,
-                                                                                                           MemoryManagerType mm,
-                                                                                                           ParameterType... args)
+std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType>
+create_named_memory_managed_function(FunctionNameType name, MemoryManagerType mm, ParameterType... args)
 {
     auto f = std::make_unique<FunctionType>(mm);
     f->configure(std::forward<ParameterType>(args)...);
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 71a6201..0e10294 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -24,14 +24,13 @@
 #ifndef ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
 #define ACL_ARM_COMPUTE_GRAPH_BACKENDS_VALIDATEHELPERS_H
 
-#include "arm_compute/graph/Logger.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/graph/nodes/Nodes.h"
-
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/nodes/Nodes.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/Types.h"
 
 namespace arm_compute
 {
@@ -63,7 +62,8 @@
 template <typename ArgMinMaxLayer>
 Status validate_arg_min_max_layer(ArgMinMaxLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -86,7 +86,8 @@
 template <typename BoundingBoxTransformLayer>
 Status validate_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: "
+                                                                                         << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -110,7 +111,8 @@
 template <typename ChannelShuffleLayer>
 Status validate_channel_shuffle_layer(ChannelShuffleLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -133,10 +135,14 @@
  *
  * @return Status
  */
-template <typename ConvolutionLayer, typename DirectConvolutionLayer, typename GEMMConvolutionLayer, typename WinogradConvolutionLayer>
+template <typename ConvolutionLayer,
+          typename DirectConvolutionLayer,
+          typename GEMMConvolutionLayer,
+          typename WinogradConvolutionLayer>
 Status validate_convolution_layer(ConvolutionLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -146,7 +152,7 @@
     arm_compute::ITensorInfo *biases  = get_backing_tensor_info(node.input(2));
     arm_compute::ITensorInfo *output  = get_backing_tensor_info(node.output(0));
 
-    if(is_data_type_quantized_asymmetric(input->data_type()))
+    if (is_data_type_quantized_asymmetric(input->data_type()))
     {
         biases->set_data_type(DataType::S32);
     }
@@ -158,23 +164,24 @@
 
     // Validate function
     Status status{};
-    switch(conv_algorithm)
+    switch (conv_algorithm)
     {
         case ConvolutionMethod::Direct:
             ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
             status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
             break;
         case ConvolutionMethod::GEMM:
-            status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
-                                                    WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
+            status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info, WeightsInfo(),
+                                                    Size2D(1, 1), ActivationLayerInfo(), num_groups);
             break;
         case ConvolutionMethod::Winograd:
             ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
-            status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
+            status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info,
+                                                        ActivationLayerInfo(), fast_math);
             break;
         case ConvolutionMethod::Default:
-            status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
-                                                WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
+            status = ConvolutionLayer::validate(input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1, 1),
+                                                ActivationLayerInfo(), fast_math, num_groups);
             break;
         default:
             ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
@@ -194,7 +201,8 @@
 template <typename DepthwiseConvolutionLayer>
 Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: "
+                                                                                         << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -210,7 +218,7 @@
 
     // Validate function
     Status status{};
-    switch(dwc_algorithm)
+    switch (dwc_algorithm)
     {
         case DepthwiseConvolutionMethod::Default:
         case DepthwiseConvolutionMethod::Optimized3x3:
@@ -233,7 +241,8 @@
 template <typename DepthToSpaceLayer>
 Status validate_depth_to_space_layer(DepthToSpaceLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -254,7 +263,8 @@
 template <typename DequantizationLayer>
 Status validate_dequantization_layer(DequantizationLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -275,7 +285,8 @@
 template <typename DetectionOutputLayer>
 Status validate_detection_output_layer(DetectionOutputLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -299,7 +310,8 @@
 template <typename DetectionPostProcessLayer>
 Status validate_detection_post_process_layer(DetectionPostProcessLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: "
+                                                                                         << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
 
@@ -327,7 +339,8 @@
 template <typename GenerateProposalsLayer>
 Status validate_generate_proposals_layer(GenerateProposalsLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 3);
 
@@ -354,7 +367,8 @@
 template <typename L2NormalizeLayer>
 Status validate_l2_normalize_layer(L2NormalizeLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -379,7 +393,8 @@
 template <typename NormalizePlanarYUVLayer>
 Status validate_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -404,7 +419,8 @@
 template <typename PadLayer>
 Status validate_pad_layer(PadLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name()
+                                                                        << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -427,14 +443,15 @@
 template <typename PermuteLayer>
 Status validate_permute_layer(PermuteLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name()
+                                                                            << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
     arm_compute::ITensorInfo *input  = get_backing_tensor_info(node.input(0));
     arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
-    const PermutationVector &perm   = node.permutation_vector();
+    const PermutationVector  &perm   = node.permutation_vector();
 
     return PermuteLayer::validate(input, output, perm);
 }
@@ -450,7 +467,8 @@
 template <typename PReluLayer>
 Status validate_prelu_layer(PReluLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name()
+                                                                     << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -473,7 +491,8 @@
 template <typename PriorBoxLayer>
 Status validate_priorbox_layer(PriorBoxLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -497,7 +516,8 @@
 template <typename QuantizationLayer>
 Status validate_quantization_layer(QuantizationLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -520,7 +540,8 @@
 template <typename ReductionLayer>
 Status validate_reduction_operation_layer(ReductionLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
 
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
@@ -544,7 +565,8 @@
 template <typename ReorgLayer>
 Status validate_reorg_layer(ReorgLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name()
+                                                                          << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -567,7 +589,8 @@
 template <typename ReshapeLayer>
 Status validate_reshape_layer(ReshapeLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name()
+                                                                            << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -590,14 +613,15 @@
 template <typename ROIAlignLayer>
 Status validate_roi_align_layer(ROIAlignLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
     // Extract input and output
-    arm_compute::ITensorInfo *input     = detail::get_backing_tensor_info(node.input(0));
-    arm_compute::ITensorInfo *rois      = detail::get_backing_tensor_info(node.input(1));
-    arm_compute::ITensorInfo *output    = detail::get_backing_tensor_info(node.output(0));
+    arm_compute::ITensorInfo  *input     = detail::get_backing_tensor_info(node.input(0));
+    arm_compute::ITensorInfo  *rois      = detail::get_backing_tensor_info(node.input(1));
+    arm_compute::ITensorInfo  *output    = detail::get_backing_tensor_info(node.output(0));
     const ROIPoolingLayerInfo &pool_info = node.pooling_info();
 
     // Validate function
@@ -615,7 +639,8 @@
 template <typename SliceLayer>
 Status validate_slice_layer(SliceLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name()
+                                                                     << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -639,7 +664,8 @@
 template <typename StridedSliceLayer>
 Status validate_strided_slice_layer(StridedSliceLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name()
+                                                                            << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -651,7 +677,8 @@
     const BiStrides             strides = node.strides();
     const StridedSliceLayerInfo info    = node.strided_slice_info();
 
-    return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
+    return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(),
+                                       info.shrink_axis_mask());
 }
 
 /** Validates a element-wise layer node
@@ -663,7 +690,8 @@
 template <typename EltwiseLayerFunctions>
 Status validate_eltwise_Layer(EltwiseLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name()
+                                                                            << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -678,23 +706,24 @@
     const QuantizationInfo          quant_info     = node.output_quant_info();
 
     // Validate function
-    if(eltwise_op == EltwiseOperation::Add)
+    if (eltwise_op == EltwiseOperation::Add)
     {
         return EltwiseLayerFunctions::ArithmeticAddition::validate(input1, input2, output, convert_policy, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Sub)
+    else if (eltwise_op == EltwiseOperation::Sub)
     {
         return EltwiseLayerFunctions::ArithmeticSubtraction::validate(input1, input2, output, convert_policy, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Mul)
+    else if (eltwise_op == EltwiseOperation::Mul)
     {
-        return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy, round_policy, act_info);
+        return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy,
+                                                                        round_policy, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Max)
+    else if (eltwise_op == EltwiseOperation::Max)
     {
         return EltwiseLayerFunctions::ElementwiseMax::validate(input1, input2, output, act_info);
     }
-    else if(eltwise_op == EltwiseOperation::Div)
+    else if (eltwise_op == EltwiseOperation::Div)
     {
         return EltwiseLayerFunctions::ArithmeticDivision::validate(input1, input2, output, act_info);
     }
@@ -713,7 +742,8 @@
 template <typename UnaryEltwiseLayerFunctions>
 Status validate_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
 {
-    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name()
+                                                                            << std::endl);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
@@ -723,7 +753,7 @@
     const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
 
     // Validate function
-    if(eltwise_op == UnaryEltwiseOperation::Exp)
+    if (eltwise_op == UnaryEltwiseOperation::Exp)
     {
         return UnaryEltwiseLayerFunctions::ExpLayer::validate(input, output);
     }
diff --git a/arm_compute/graph/frontend/IStream.h b/arm_compute/graph/frontend/IStream.h
index f69d543..1831ac0 100644
--- a/arm_compute/graph/frontend/IStream.h
+++ b/arm_compute/graph/frontend/IStream.h
@@ -84,8 +84,8 @@
     }
 
 protected:
-    StreamHints _hints     = {};              /**< Execution and algorithmic hints */
-    NodeID      _tail_node = { EmptyNodeID }; /**< NodeID pointing to the last(tail) node of the graph */
+    StreamHints _hints     = {};            /**< Execution and algorithmic hints */
+    NodeID      _tail_node = {EmptyNodeID}; /**< NodeID pointing to the last(tail) node of the graph */
 };
 } // namespace frontend
 } // namespace graph
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index fe0539b..bd321e6 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -24,13 +24,12 @@
 #ifndef ARM_COMPUTE_GRAPH_LAYERS_H
 #define ARM_COMPUTE_GRAPH_LAYERS_H
 
-#include "arm_compute/graph/GraphBuilder.h"
-#include "arm_compute/graph/Types.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 #include "arm_compute/graph/frontend/ILayer.h"
 #include "arm_compute/graph/frontend/IStream.h"
 #include "arm_compute/graph/frontend/SubStream.h"
-
-#include "arm_compute/core/utils/misc/Utility.h"
+#include "arm_compute/graph/GraphBuilder.h"
+#include "arm_compute/graph/Types.h"
 
 #include <memory>
 #include <string>
@@ -50,14 +49,13 @@
      * @param[in] desc     Description of input tensor.
      * @param[in] accessor Accessor to get input tensor data from.
      */
-    InputLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor)
-        : _desc(desc), _accessor(std::move(accessor))
+    InputLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor) : _desc(desc), _accessor(std::move(accessor))
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams common_params = { name(), s.hints().target_hint };
+        NodeParams common_params = {name(), s.hints().target_hint};
         return GraphBuilder::add_input_node(s.graph(), common_params, _desc, std::move(_accessor));
     }
 
@@ -75,14 +73,13 @@
      * @param[in] desc     Description of input tensor.
      * @param[in] accessor Accessor to get input tensor data from.
      */
-    ConstantLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor)
-        : _desc(desc), _accessor(std::move(accessor))
+    ConstantLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor) : _desc(desc), _accessor(std::move(accessor))
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams common_params = { name(), s.hints().target_hint };
+        NodeParams common_params = {name(), s.hints().target_hint};
         return GraphBuilder::add_const_node(s.graph(), common_params, _desc, std::move(_accessor));
     }
 
@@ -107,8 +104,8 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), _connection_idx };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), _connection_idx};
         return GraphBuilder::add_output_node(s.graph(), common_params, input, std::move(_accessor));
     }
 
@@ -126,18 +123,17 @@
      * @param[in] act_info       Activation information
      * @param[in] out_quant_info (Optional) Output quantization info
      */
-    ActivationLayer(ActivationLayerInfo    act_info,
-                    const QuantizationInfo out_quant_info = QuantizationInfo())
-        : _act_info(act_info),
-          _out_quant_info(std::move(out_quant_info))
+    ActivationLayer(ActivationLayerInfo act_info, const QuantizationInfo out_quant_info = QuantizationInfo())
+        : _act_info(act_info), _out_quant_info(std::move(out_quant_info))
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info, std::move(_out_quant_info));
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
+        return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info,
+                                                 std::move(_out_quant_info));
     }
 
 private:
@@ -160,10 +156,7 @@
                    unsigned int           axis,
                    DataType               out_data_type  = DataType::UNKNOWN,
                    const QuantizationInfo out_quant_info = QuantizationInfo())
-        : _op(op),
-          _axis(axis),
-          _out_data_type(out_data_type),
-          _out_quant_info(std::move(out_quant_info))
+        : _op(op), _axis(axis), _out_data_type(out_data_type), _out_quant_info(std::move(out_quant_info))
     {
     }
 
@@ -175,9 +168,10 @@
      */
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        return GraphBuilder::add_arg_min_max_node(s.graph(), common_params, input, _op, _axis, _out_data_type, std::move(_out_quant_info));
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
+        return GraphBuilder::add_arg_min_max_node(s.graph(), common_params, input, _op, _axis, _out_data_type,
+                                                  std::move(_out_quant_info));
     }
 
 private:
@@ -204,7 +198,11 @@
                             ITensorAccessorUPtr gamma   = nullptr,
                             ITensorAccessorUPtr beta    = nullptr,
                             float               epsilon = 0.001f)
-        : _mean(std::move(mean)), _var(std::move(var)), _gamma(std::move(gamma)), _beta(std::move(beta)), _epsilon(epsilon)
+        : _mean(std::move(mean)),
+          _var(std::move(var)),
+          _gamma(std::move(gamma)),
+          _beta(std::move(beta)),
+          _epsilon(epsilon)
     {
     }
 
@@ -213,10 +211,10 @@
         ARM_COMPUTE_ERROR_ON(_mean == nullptr);
         ARM_COMPUTE_ERROR_ON(_var == nullptr);
 
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        return GraphBuilder::add_batch_normalization_node(s.graph(), common_params, input, _epsilon,
-                                                          std::move(_mean), std::move(_var), std::move(_beta), std::move(_gamma));
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
+        return GraphBuilder::add_batch_normalization_node(s.graph(), common_params, input, _epsilon, std::move(_mean),
+                                                          std::move(_var), std::move(_beta), std::move(_gamma));
     }
 
 private:
@@ -237,7 +235,9 @@
      * @param[in] sub_stream_deltas Graph sub-stream for the deltas
      * @param[in] info              Contains BoundingBox operation information described in @ref BoundingBoxTransformInfo.
      */
-    BoundingBoxTransformLayer(SubStream &&sub_stream_input, SubStream &&sub_stream_deltas, BoundingBoxTransformInfo info)
+    BoundingBoxTransformLayer(SubStream              &&sub_stream_input,
+                              SubStream              &&sub_stream_deltas,
+                              BoundingBoxTransformInfo info)
         : _ss_input(sub_stream_input), _ss_deltas(sub_stream_deltas), _bbox_info(info)
     {
     }
@@ -250,9 +250,9 @@
      */
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { _ss_input.tail_node(), 0 };
-        NodeIdxPair deltas        = { _ss_deltas.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {_ss_input.tail_node(), 0};
+        NodeIdxPair deltas        = {_ss_deltas.tail_node(), 0};
         return GraphBuilder::add_bounding_box_transform_node(s.graph(), common_params, input, deltas, _bbox_info);
     }
 
@@ -270,15 +270,14 @@
      *
      * @param[in] num_groups Number of groups
      */
-    ChannelShuffleLayer(unsigned int num_groups)
-        : _num_groups(num_groups)
+    ChannelShuffleLayer(unsigned int num_groups) : _num_groups(num_groups)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_channel_shuffle_node(s.graph(), common_params, input, _num_groups);
     }
 
@@ -297,17 +296,15 @@
      * @param[in] rest_sub_streams Rest sub-graph branches
      */
     template <typename... Ts>
-    ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+    ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&...rest_sub_streams)
         : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
     {
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
 
-        utility::for_each([&](SubStream && sub_stream)
-        {
-            _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
-        },
-        std::move(rest_sub_streams)...);
+        utility::for_each([&](SubStream &&sub_stream)
+                          { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+                          std::move(rest_sub_streams)...);
     }
     /** Construct a concatenation layer
      *
@@ -317,33 +314,33 @@
      * @param[in] rest_sub_streams  Rest sub-graph branches
      */
     template <typename... Ts>
-    ConcatLayer(descriptors::ConcatLayerDescriptor concat_descriptor, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+    ConcatLayer(descriptors::ConcatLayerDescriptor concat_descriptor,
+                SubStream                        &&sub_stream1,
+                SubStream                        &&sub_stream2,
+                Ts &&...rest_sub_streams)
         : _sub_streams(), _concat_descriptor(concat_descriptor)
     {
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
 
-        utility::for_each([&](SubStream && sub_stream)
-        {
-            _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
-        },
-        std::move(rest_sub_streams)...);
+        utility::for_each([&](SubStream &&sub_stream)
+                          { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+                          std::move(rest_sub_streams)...);
     }
     /** Construct a concat layer
      *
      * @param[in] sub_stream Sub-stream
      */
     template <typename... Ts>
-    ConcatLayer(SubStream &&sub_stream)
-        : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
+    ConcatLayer(SubStream &&sub_stream) : _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
     {
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
     }
     NodeID create_layer(IStream &s) override
     {
         NodeID     nid           = EmptyNodeID;
-        NodeParams common_params = { name(), s.hints().target_hint };
-        if(_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
+        NodeParams common_params = {name(), s.hints().target_hint};
+        if (_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
         {
             nid = _sub_streams[0]->tail_node();
         }
@@ -351,14 +348,14 @@
         {
             // Collect tail nodes and concatenate
             std::vector<NodeIdxPair> nodes;
-            for(auto &ss : _sub_streams)
+            for (auto &ss : _sub_streams)
             {
-                if(ss && (ss->tail_node() != EmptyNodeID))
+                if (ss && (ss->tail_node() != EmptyNodeID))
                 {
                     const auto tail_node = s.graph().node(ss->tail_node());
-                    if(tail_node != nullptr && tail_node->type() != NodeType::Output)
+                    if (tail_node != nullptr && tail_node->type() != NodeType::Output)
                     {
-                        nodes.push_back({ ss->tail_node(), 0 });
+                        nodes.push_back({ss->tail_node(), 0});
                     }
                 }
             }
@@ -411,12 +408,12 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        return GraphBuilder::add_convolution_node(s.graph(), common_params, input,
-                                                  Size2D(_conv_width, _conv_height), _ofm, _conv_info, _num_groups,
-                                                  s.hints().convolution_method_hint, s.hints().fast_math_hint,
-                                                  std::move(_weights), std::move(_bias), std::move(_weights_quant_info), std::move(_out_quant_info));
+        NodeIdxPair input         = {s.tail_node(), 0};
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        return GraphBuilder::add_convolution_node(s.graph(), common_params, input, Size2D(_conv_width, _conv_height),
+                                                  _ofm, _conv_info, _num_groups, s.hints().convolution_method_hint,
+                                                  s.hints().fast_math_hint, std::move(_weights), std::move(_bias),
+                                                  std::move(_weights_quant_info), std::move(_out_quant_info));
     }
 
 private:
@@ -461,11 +458,10 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        return GraphBuilder::add_deconvolution_node(s.graph(), common_params, input,
-                                                    Size2D(_conv_width, _conv_height), _ofm, _deconv_info,
-                                                    std::move(_weights), std::move(_bias));
+        NodeIdxPair input         = {s.tail_node(), 0};
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        return GraphBuilder::add_deconvolution_node(s.graph(), common_params, input, Size2D(_conv_width, _conv_height),
+                                                    _ofm, _deconv_info, std::move(_weights), std::move(_bias));
     }
 
 private:
@@ -513,12 +509,12 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        return GraphBuilder::add_depthwise_convolution_node(s.graph(), common_params,
-                                                            input, Size2D(_conv_width, _conv_height), _conv_info, _depth_multiplier,
-                                                            s.hints().depthwise_convolution_method_hint,
-                                                            std::move(_weights), std::move(_bias), std::move(_weights_quant_info), std::move(_out_quant_info));
+        NodeIdxPair input         = {s.tail_node(), 0};
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        return GraphBuilder::add_depthwise_convolution_node(
+            s.graph(), common_params, input, Size2D(_conv_width, _conv_height), _conv_info, _depth_multiplier,
+            s.hints().depthwise_convolution_method_hint, std::move(_weights), std::move(_bias),
+            std::move(_weights_quant_info), std::move(_out_quant_info));
     }
 
 private:
@@ -540,15 +536,14 @@
      *
      * @param[in] block_shape Block size to rearranged
      */
-    DepthToSpaceLayer(int32_t block_shape)
-        : _block_shape(block_shape)
+    DepthToSpaceLayer(int32_t block_shape) : _block_shape(block_shape)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_depth_to_space_node(s.graph(), common_params, input, _block_shape);
     }
 
@@ -569,8 +564,8 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_dequantization_node(s.graph(), common_params, input);
     }
 };
@@ -585,18 +580,21 @@
      * @param[in] sub_stream_prior PriorBox graph sub-stream.
      * @param[in] detect_info      DetectionOutput parameters.
      */
-    DetectionOutputLayer(SubStream &&sub_stream_conf, SubStream &&sub_stream_prior, const DetectionOutputLayerInfo &detect_info)
+    DetectionOutputLayer(SubStream                     &&sub_stream_conf,
+                         SubStream                     &&sub_stream_prior,
+                         const DetectionOutputLayerInfo &detect_info)
         : _ss_conf(std::move(sub_stream_conf)), _ss_prior(std::move(sub_stream_prior)), _detect_info(detect_info)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params  = { name(), s.hints().target_hint };
-        NodeIdxPair input_loc      = { s.tail_node(), 0 };
-        NodeIdxPair input_conf     = { _ss_conf.tail_node(), 0 };
-        NodeIdxPair input_priorbox = { _ss_prior.tail_node(), 0 };
-        return GraphBuilder::add_detection_output_node(s.graph(), common_params, input_loc, input_conf, input_priorbox, _detect_info);
+        NodeParams  common_params  = {name(), s.hints().target_hint};
+        NodeIdxPair input_loc      = {s.tail_node(), 0};
+        NodeIdxPair input_conf     = {_ss_conf.tail_node(), 0};
+        NodeIdxPair input_priorbox = {_ss_prior.tail_node(), 0};
+        return GraphBuilder::add_detection_output_node(s.graph(), common_params, input_loc, input_conf, input_priorbox,
+                                                       _detect_info);
     }
 
 private:
@@ -615,9 +613,14 @@
      * @param[in] anchors                     Accessor to get anchors tensor data from.
      * @param[in] out_quant_info              (Optional) Output quantization info
      */
-    DetectionPostProcessLayer(SubStream &&sub_stream_class_prediction, DetectionPostProcessLayerInfo detect_info, ITensorAccessorUPtr anchors,
-                              const QuantizationInfo out_quant_info = QuantizationInfo())
-        : _sub_stream_class_prediction(std::move(sub_stream_class_prediction)), _detect_info(detect_info), _anchors(std::move(anchors)), _out_quant_info(std::move(out_quant_info))
+    DetectionPostProcessLayer(SubStream                   &&sub_stream_class_prediction,
+                              DetectionPostProcessLayerInfo detect_info,
+                              ITensorAccessorUPtr           anchors,
+                              const QuantizationInfo        out_quant_info = QuantizationInfo())
+        : _sub_stream_class_prediction(std::move(sub_stream_class_prediction)),
+          _detect_info(detect_info),
+          _anchors(std::move(anchors)),
+          _out_quant_info(std::move(out_quant_info))
     {
     }
 
@@ -625,10 +628,12 @@
     {
         ARM_COMPUTE_ERROR_ON(_anchors == nullptr);
 
-        NodeParams  common_params          = { name(), s.hints().target_hint };
-        NodeIdxPair input_box_encoding     = { s.tail_node(), 0 };
-        NodeIdxPair input_class_prediction = { _sub_stream_class_prediction.tail_node(), 0 };
-        return GraphBuilder::add_detection_post_process_node(s.graph(), common_params, input_box_encoding, input_class_prediction, _detect_info, std::move(_anchors), std::move(_out_quant_info));
+        NodeParams  common_params          = {name(), s.hints().target_hint};
+        NodeIdxPair input_box_encoding     = {s.tail_node(), 0};
+        NodeIdxPair input_class_prediction = {_sub_stream_class_prediction.tail_node(), 0};
+        return GraphBuilder::add_detection_post_process_node(s.graph(), common_params, input_box_encoding,
+                                                             input_class_prediction, _detect_info, std::move(_anchors),
+                                                             std::move(_out_quant_info));
     }
 
 private:
@@ -645,15 +650,14 @@
      *
      * @param[in] shape Output shape
      */
-    DummyLayer(TensorShape shape)
-        : _shape(shape)
+    DummyLayer(TensorShape shape) : _shape(shape)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_dummy_node(s.graph(), common_params, input, _shape);
     }
 
@@ -677,9 +681,9 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input0        = { _ss0.tail_node(), 0 };
-        NodeIdxPair input1        = { _ss1.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input0        = {_ss0.tail_node(), 0};
+        NodeIdxPair input1        = {_ss1.tail_node(), 0};
 
         return GraphBuilder::add_elementwise_node(s.graph(), common_params, input0, input1, _op);
     }
@@ -700,8 +704,8 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_flatten_node(s.graph(), common_params, input);
     }
 };
@@ -770,13 +774,13 @@
      */
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        if(_weights != nullptr)
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
+        if (_weights != nullptr)
         {
-            return GraphBuilder::add_fully_connected_layer(s.graph(), common_params, input, _num_outputs,
-                                                           std::move(_weights), std::move(_bias), _fc_info,
-                                                           std::move(_weights_quant_info), std::move(_out_quant_info), s.hints().fast_math_hint);
+            return GraphBuilder::add_fully_connected_layer(
+                s.graph(), common_params, input, _num_outputs, std::move(_weights), std::move(_bias), _fc_info,
+                std::move(_weights_quant_info), std::move(_out_quant_info), s.hints().fast_math_hint);
         }
         else
         {
@@ -811,8 +815,14 @@
      * @param[in] ss_anchors Graph sub-stream for the anchors.
      * @param[in] info       Generate Proposals operation information.
      */
-    GenerateProposalsLayer(SubStream &&ss_scores, SubStream &&ss_deltas, SubStream &&ss_anchors, GenerateProposalsInfo info)
-        : _ss_scores(std::move(ss_scores)), _ss_deltas(std::move(ss_deltas)), _ss_anchors(std::move(ss_anchors)), _info(info)
+    GenerateProposalsLayer(SubStream           &&ss_scores,
+                           SubStream           &&ss_deltas,
+                           SubStream           &&ss_anchors,
+                           GenerateProposalsInfo info)
+        : _ss_scores(std::move(ss_scores)),
+          _ss_deltas(std::move(ss_deltas)),
+          _ss_anchors(std::move(ss_anchors)),
+          _info(info)
     {
     }
 
@@ -824,10 +834,10 @@
      */
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair scores        = { _ss_scores.tail_node(), 0 };
-        NodeIdxPair deltas        = { _ss_deltas.tail_node(), 0 };
-        NodeIdxPair anchors       = { _ss_anchors.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair scores        = {_ss_scores.tail_node(), 0};
+        NodeIdxPair deltas        = {_ss_deltas.tail_node(), 0};
+        NodeIdxPair anchors       = {_ss_anchors.tail_node(), 0};
         return GraphBuilder::add_generate_proposals_node(s.graph(), common_params, scores, deltas, anchors, _info);
     }
 
@@ -847,15 +857,14 @@
      * @param[in] axis    Axis to perform normalization on
      * @param[in] epsilon Lower bound value for the normalization
      */
-    L2NormalizeLayer(int axis, float epsilon)
-        : _axis(axis), _epsilon(epsilon)
+    L2NormalizeLayer(int axis, float epsilon) : _axis(axis), _epsilon(epsilon)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_l2_normalize_node(s.graph(), common_params, input, _axis, _epsilon);
     }
 
@@ -872,15 +881,14 @@
      *
      * @param[in] norm_info Normalization information.
      */
-    NormalizationLayer(NormalizationLayerInfo norm_info)
-        : _norm_info(norm_info)
+    NormalizationLayer(NormalizationLayerInfo norm_info) : _norm_info(norm_info)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_normalization_node(s.graph(), common_params, input, _norm_info);
     }
 
@@ -897,8 +905,7 @@
      * @param[in] mean Accessor to get mean tensor data from.
      * @param[in] std  Accessor to get std tensor data from.
      */
-    NormalizePlanarYUVLayer(ITensorAccessorUPtr mean,
-                            ITensorAccessorUPtr std)
+    NormalizePlanarYUVLayer(ITensorAccessorUPtr mean, ITensorAccessorUPtr std)
         : _mean(std::move(mean)), _std(std::move(std))
     {
     }
@@ -908,10 +915,10 @@
         ARM_COMPUTE_ERROR_ON(_mean == nullptr);
         ARM_COMPUTE_ERROR_ON(_std == nullptr);
 
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
-        return GraphBuilder::add_normalize_planar_yuv_node(s.graph(), common_params, input,
-                                                           std::move(_mean), std::move(_std));
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
+        return GraphBuilder::add_normalize_planar_yuv_node(s.graph(), common_params, input, std::move(_mean),
+                                                           std::move(_std));
     }
 
 private:
@@ -929,15 +936,14 @@
      *                      specifies the front and the end padding in the i-th dimension.
      * @param[in] pad_value Padding value to use. Defaults to 0.
      */
-    PadLayer(PaddingList padding, PixelValue pad_value = PixelValue())
-        : _padding(padding), _pad_value(pad_value)
+    PadLayer(PaddingList padding, PixelValue pad_value = PixelValue()) : _padding(padding), _pad_value(pad_value)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_pad_node(s.graph(), common_params, input, _padding, _pad_value);
     }
 
@@ -956,15 +962,14 @@
      * @param[in] layout (Optional) Data layout to assign to permuted tensor.
      *                   If UNKNOWN then the input's layout will be used.
      */
-    PermuteLayer(PermutationVector perm, DataLayout layout = DataLayout::UNKNOWN)
-        : _perm(perm), _layout(layout)
+    PermuteLayer(PermutationVector perm, DataLayout layout = DataLayout::UNKNOWN) : _perm(perm), _layout(layout)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_permute_node(s.graph(), common_params, input, _perm, _layout);
     }
 
@@ -981,15 +986,14 @@
      *
      * @param[in] pool_info Pooling information.
      */
-    PoolingLayer(PoolingLayerInfo pool_info)
-        : _pool_info(pool_info)
+    PoolingLayer(PoolingLayerInfo pool_info) : _pool_info(pool_info)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_pooling_node(s.graph(), common_params, input, _pool_info);
     }
 
@@ -1013,9 +1017,9 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { _ss0.tail_node(), 0 };
-        NodeIdxPair alpha         = { _ss1.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {_ss0.tail_node(), 0};
+        NodeIdxPair alpha         = {_ss1.tail_node(), 0};
 
         return GraphBuilder::add_prelu_node(s.graph(), common_params, input, alpha);
     }
@@ -1064,15 +1068,17 @@
      * @param[in] format_info (Optional) Format info.
      * @param[in] transform   (Optional) Input transform function.
      */
-    PrintLayer(std::ostream &stream, const IOFormatInfo &format_info = IOFormatInfo(), const std::function<ITensor *(ITensor *)> transform = nullptr)
+    PrintLayer(std::ostream                             &stream,
+               const IOFormatInfo                       &format_info = IOFormatInfo(),
+               const std::function<ITensor *(ITensor *)> transform   = nullptr)
         : _stream(stream), _format_info(format_info), _transform(transform)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_print_node(s.graph(), common_params, input, _stream, _format_info, _transform);
     }
 
@@ -1098,9 +1104,9 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input0        = { s.tail_node(), 0 };
-        NodeIdxPair input1        = { _ss.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input0        = {s.tail_node(), 0};
+        NodeIdxPair input1        = {_ss.tail_node(), 0};
         return GraphBuilder::add_priorbox_node(s.graph(), common_params, input0, input1, _prior_info);
     }
 
@@ -1117,15 +1123,14 @@
      *
      * @param[in] out_quant_info Output tensor quantization info
      */
-    QuantizationLayer(QuantizationInfo out_quant_info)
-        : _out_quant_info(out_quant_info)
+    QuantizationLayer(QuantizationInfo out_quant_info) : _out_quant_info(out_quant_info)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_quantization_node(s.graph(), common_params, input, _out_quant_info);
     }
 
@@ -1150,8 +1155,8 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_reduction_operation_node(s.graph(), common_params, input, _op, _axis, _keep_dims);
     }
 
@@ -1170,15 +1175,14 @@
      * @param[in] stride Stride value to use for reorganizing the values in the output tensor.
      *                   It defines the spatial distance between 2 consecutive pixels in the x and y direction
      */
-    ReorgLayer(int stride)
-        : _stride(stride)
+    ReorgLayer(int stride) : _stride(stride)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_reorg_node(s.graph(), common_params, input, _stride);
     }
 
@@ -1194,15 +1198,14 @@
      *
      * @param[in] shape Target shape.
      */
-    ReshapeLayer(TensorShape shape)
-        : _shape(shape)
+    ReshapeLayer(TensorShape shape) : _shape(shape)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_reshape_node(s.graph(), common_params, input, _shape);
     }
 
@@ -1221,8 +1224,8 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_resize_node(s.graph(), common_params, input, _policy, _width_scale, _height_scale);
     }
 
@@ -1254,9 +1257,9 @@
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { _ss_input.tail_node(), 0 };
-        NodeIdxPair rois          = { _ss_rois.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {_ss_input.tail_node(), 0};
+        NodeIdxPair rois          = {_ss_rois.tail_node(), 0};
         return GraphBuilder::add_roi_align_node(s.graph(), common_params, input, rois, _pool_info);
     }
 
@@ -1275,16 +1278,15 @@
      * @param[in] mul_w Accessor to get mul weight from.
      * @param[in] add_w Accessor to get add weight from.
      */
-    ScaleLayer(ITensorAccessorUPtr mul_w,
-               ITensorAccessorUPtr add_w)
+    ScaleLayer(ITensorAccessorUPtr mul_w, ITensorAccessorUPtr add_w)
         : _mul_w(std::move(mul_w)), _add_w(std::move(add_w))
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_scale_layer(s.graph(), common_params, input, std::move(_mul_w), std::move(_add_w));
     }
 
@@ -1302,15 +1304,14 @@
      * @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
      * @param[in] ends   The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
      */
-    SliceLayer(Coordinates &starts, Coordinates &ends)
-        : _starts(starts), _ends(ends)
+    SliceLayer(Coordinates &starts, Coordinates &ends) : _starts(starts), _ends(ends)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_slice_node(s.graph(), common_params, input, _starts, _ends);
     }
 
@@ -1327,15 +1328,14 @@
      *
      * @param[in] beta (Optional) Beta value. Default 1.0.
      */
-    SoftmaxLayer(float beta = 1.0f)
-        : _beta(beta)
+    SoftmaxLayer(float beta = 1.0f) : _beta(beta)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_softmax_node(s.graph(), common_params, input, _beta);
     }
 
@@ -1354,17 +1354,14 @@
      * @param[in] rest_sub_streams Rest sub-graph branches
      */
     template <typename... Ts>
-    StackLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
-        : _sub_streams(), _axis(0)
+    StackLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&...rest_sub_streams) : _sub_streams(), _axis(0)
     {
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
 
-        utility::for_each([&](SubStream && sub_stream)
-        {
-            _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
-        },
-        std::move(rest_sub_streams)...);
+        utility::for_each([&](SubStream &&sub_stream)
+                          { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+                          std::move(rest_sub_streams)...);
     }
     /** Construct a concatenation layer
      *
@@ -1374,33 +1371,30 @@
      * @param[in] rest_sub_streams Rest sub-graph branches
      */
     template <typename... Ts>
-    StackLayer(int axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+    StackLayer(int axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&...rest_sub_streams)
         : _sub_streams(), _axis(axis)
     {
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
 
-        utility::for_each([&](SubStream && sub_stream)
-        {
-            _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
-        },
-        std::move(rest_sub_streams)...);
+        utility::for_each([&](SubStream &&sub_stream)
+                          { _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream))); },
+                          std::move(rest_sub_streams)...);
     }
     /** Construct a concat layer
      *
      * @param[in] sub_stream Sub-stream
      */
     template <typename... Ts>
-    StackLayer(SubStream &&sub_stream)
-        : _sub_streams(), _axis(0)
+    StackLayer(SubStream &&sub_stream) : _sub_streams(), _axis(0)
     {
         _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
     }
     NodeID create_layer(IStream &s) override
     {
         NodeID     nid           = EmptyNodeID;
-        NodeParams common_params = { name(), s.hints().target_hint };
-        if(_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
+        NodeParams common_params = {name(), s.hints().target_hint};
+        if (_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
         {
             nid = _sub_streams[0]->tail_node();
         }
@@ -1408,14 +1402,14 @@
         {
             // Collect tail nodes and stack
             std::vector<NodeIdxPair> nodes;
-            for(auto &ss : _sub_streams)
+            for (auto &ss : _sub_streams)
             {
-                if(ss && (ss->tail_node() != EmptyNodeID))
+                if (ss && (ss->tail_node() != EmptyNodeID))
                 {
                     const auto tail_node = s.graph().node(ss->tail_node());
-                    if(tail_node != nullptr && tail_node->type() != NodeType::Output)
+                    if (tail_node != nullptr && tail_node->type() != NodeType::Output)
                     {
-                        nodes.push_back({ ss->tail_node(), 0 });
+                        nodes.push_back({ss->tail_node(), 0});
                     }
                 }
             }
@@ -1440,15 +1434,18 @@
      * @param[in] strides            The strides of the dimensions of the input tensor to be sliced. The length must be of rank(input).
      * @param[in] strided_slice_info Contains masks for the starts, ends and strides
      */
-    StridedSliceLayer(Coordinates &starts, Coordinates &ends, BiStrides &strides, StridedSliceLayerInfo strided_slice_info)
+    StridedSliceLayer(Coordinates          &starts,
+                      Coordinates          &ends,
+                      BiStrides            &strides,
+                      StridedSliceLayerInfo strided_slice_info)
         : _starts(starts), _ends(ends), _strides(strides), _info(strided_slice_info)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_strided_slice_node(s.graph(), common_params, input, _starts, _ends, _strides, _info);
     }
 
@@ -1467,15 +1464,14 @@
      *
      * @param[in] act_info Activation info
      */
-    YOLOLayer(ActivationLayerInfo act_info)
-        : _act_info(act_info)
+    YOLOLayer(ActivationLayerInfo act_info) : _act_info(act_info)
     {
     }
 
     NodeID create_layer(IStream &s) override
     {
-        NodeParams  common_params = { name(), s.hints().target_hint };
-        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = {name(), s.hints().target_hint};
+        NodeIdxPair input         = {s.tail_node(), 0};
         return GraphBuilder::add_yolo_node(s.graph(), common_params, input, _act_info);
     }
 
diff --git a/arm_compute/graph/frontend/Stream.h b/arm_compute/graph/frontend/Stream.h
index db22f6d..7e760b6 100644
--- a/arm_compute/graph/frontend/Stream.h
+++ b/arm_compute/graph/frontend/Stream.h
@@ -27,7 +27,6 @@
 #include "arm_compute/graph/frontend/IStream.h"
 #include "arm_compute/graph/frontend/IStreamOperators.h"
 #include "arm_compute/graph/frontend/Types.h"
-
 #include "arm_compute/graph/Graph.h"
 #include "arm_compute/graph/GraphContext.h"
 #include "arm_compute/graph/GraphManager.h"
@@ -65,7 +64,7 @@
     void run();
 
     // Inherited overridden methods
-    void add_layer(ILayer &layer) override;
+    void         add_layer(ILayer &layer) override;
     Graph       &graph() override;
     const Graph &graph() const override;
 
diff --git a/arm_compute/graph/frontend/SubStream.h b/arm_compute/graph/frontend/SubStream.h
index 2283cfe..c54317c 100644
--- a/arm_compute/graph/frontend/SubStream.h
+++ b/arm_compute/graph/frontend/SubStream.h
@@ -54,7 +54,7 @@
     SubStream(IStream &s);
 
     // Inherited overridden methods
-    void add_layer(ILayer &layer) override;
+    void         add_layer(ILayer &layer) override;
     Graph       &graph() override;
     const Graph &graph() const override;
 
diff --git a/arm_compute/graph/frontend/Types.h b/arm_compute/graph/frontend/Types.h
index bc4fe7a..42b28b3 100644
--- a/arm_compute/graph/frontend/Types.h
+++ b/arm_compute/graph/frontend/Types.h
@@ -33,39 +33,40 @@
 namespace frontend
 {
 // Import types for graph
-using graph::DataType;
 using graph::DataLayout;
 using graph::DataLayoutDimension;
-using graph::TensorShape;
+using graph::DataType;
 using graph::PermutationVector;
+using graph::TensorShape;
 
 using graph::ActivationLayerInfo;
+using graph::ConvolutionMethod;
+using graph::DepthwiseConvolutionMethod;
+using graph::DimensionRoundingType;
 using graph::EltwiseOperation;
+using graph::FastMathHint;
 using graph::FullyConnectedLayerInfo;
+using graph::GraphConfig;
+using graph::InterpolationPolicy;
 using graph::NormalizationLayerInfo;
 using graph::NormType;
 using graph::PadStrideInfo;
 using graph::PoolingLayerInfo;
 using graph::PoolingType;
-using graph::Target;
-using graph::ConvolutionMethod;
-using graph::FastMathHint;
-using graph::DepthwiseConvolutionMethod;
-using graph::TensorDescriptor;
-using graph::DimensionRoundingType;
-using graph::GraphConfig;
-using graph::InterpolationPolicy;
 using graph::Size2D;
+using graph::Target;
+using graph::TensorDescriptor;
 
 /** Hints that can be passed to the stream to expose parameterization */
 struct StreamHints
 {
-    Target                     target_hint                       = { Target::UNSPECIFIED };                 /**< Target execution hint */
-    ConvolutionMethod          convolution_method_hint           = { ConvolutionMethod::Default };          /**< Convolution method hint */
-    DepthwiseConvolutionMethod depthwise_convolution_method_hint = { DepthwiseConvolutionMethod::Default }; /**< Depthwise Convolution method hint */
-    FastMathHint               fast_math_hint                    = { FastMathHint::Disabled };              /**< Fast math hint */
+    Target                     target_hint             = {Target::UNSPECIFIED};        /**< Target execution hint */
+    ConvolutionMethod          convolution_method_hint = {ConvolutionMethod::Default}; /**< Convolution method hint */
+    DepthwiseConvolutionMethod depthwise_convolution_method_hint = {
+        DepthwiseConvolutionMethod::Default};               /**< Depthwise Convolution method hint */
+    FastMathHint fast_math_hint = {FastMathHint::Disabled}; /**< Fast math hint */
 };
 } // namespace frontend
 } // namespace graph
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_STREAM_TYPES_H */
\ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_STREAM_TYPES_H */
diff --git a/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h b/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h
index cb1f079..61d8854 100644
--- a/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h
+++ b/arm_compute/graph/mutators/DepthConcatSubTensorMutator.h
@@ -40,7 +40,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/mutators/GroupedConvolutionMutator.h b/arm_compute/graph/mutators/GroupedConvolutionMutator.h
index e68c703..3ed8d78 100644
--- a/arm_compute/graph/mutators/GroupedConvolutionMutator.h
+++ b/arm_compute/graph/mutators/GroupedConvolutionMutator.h
@@ -40,7 +40,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/mutators/InPlaceOperationMutator.h b/arm_compute/graph/mutators/InPlaceOperationMutator.h
index 6248d86..86f62f1 100644
--- a/arm_compute/graph/mutators/InPlaceOperationMutator.h
+++ b/arm_compute/graph/mutators/InPlaceOperationMutator.h
@@ -37,7 +37,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/mutators/NodeExecutionMethodMutator.h b/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
index 07c8ffa..505d4ab 100644
--- a/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
+++ b/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
@@ -42,7 +42,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/mutators/NodeFusionMutator.h b/arm_compute/graph/mutators/NodeFusionMutator.h
index f3e3eaa..9d2d44f 100644
--- a/arm_compute/graph/mutators/NodeFusionMutator.h
+++ b/arm_compute/graph/mutators/NodeFusionMutator.h
@@ -38,7 +38,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h b/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h
index b14ef59..ab9746a 100644
--- a/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h
+++ b/arm_compute/graph/mutators/SplitLayerSubTensorMutator.h
@@ -40,7 +40,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/mutators/SyntheticDataTypeMutator.h b/arm_compute/graph/mutators/SyntheticDataTypeMutator.h
index 2292e52..ce8af0a 100644
--- a/arm_compute/graph/mutators/SyntheticDataTypeMutator.h
+++ b/arm_compute/graph/mutators/SyntheticDataTypeMutator.h
@@ -40,7 +40,7 @@
     // Inherited methods overridden
     virtual void mutate(Graph &g) override;
     MutationType type() const override;
-    const char *name() override;
+    const char  *name() override;
 
 private:
     DataType _mutate_type;
diff --git a/arm_compute/graph/nodes/ActivationLayerNode.h b/arm_compute/graph/nodes/ActivationLayerNode.h
index 4a98ee2..fe5f273 100644
--- a/arm_compute/graph/nodes/ActivationLayerNode.h
+++ b/arm_compute/graph/nodes/ActivationLayerNode.h
@@ -39,8 +39,7 @@
      * @param[in] info           Activation Layer information
      * @param[in] out_quant_info (Optional) Output quantization info
      */
-    ActivationLayerNode(ActivationLayerInfo info,
-                        QuantizationInfo    out_quant_info = QuantizationInfo());
+    ActivationLayerNode(ActivationLayerInfo info, QuantizationInfo out_quant_info = QuantizationInfo());
     /** Activation metadata accessor
      *
      * @return The activation info of the layer
@@ -51,7 +50,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::ActivationLayer;
diff --git a/arm_compute/graph/nodes/ArgMinMaxLayerNode.h b/arm_compute/graph/nodes/ArgMinMaxLayerNode.h
index 69191ad..65fbc36 100644
--- a/arm_compute/graph/nodes/ArgMinMaxLayerNode.h
+++ b/arm_compute/graph/nodes/ArgMinMaxLayerNode.h
@@ -65,7 +65,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::ArgMinMaxLayer;
diff --git a/arm_compute/graph/nodes/BatchNormalizationLayerNode.h b/arm_compute/graph/nodes/BatchNormalizationLayerNode.h
index e7f4049..8583ed8 100644
--- a/arm_compute/graph/nodes/BatchNormalizationLayerNode.h
+++ b/arm_compute/graph/nodes/BatchNormalizationLayerNode.h
@@ -60,7 +60,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::BatchNormalizationLayer;
diff --git a/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h b/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h
index 57175eb..96c2544 100644
--- a/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h
+++ b/arm_compute/graph/nodes/BoundingBoxTransformLayerNode.h
@@ -50,7 +50,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     BoundingBoxTransformInfo _bbox_info;
diff --git a/arm_compute/graph/nodes/ChannelShuffleLayerNode.h b/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
index 0696fe5..d296a2d 100644
--- a/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
+++ b/arm_compute/graph/nodes/ChannelShuffleLayerNode.h
@@ -49,7 +49,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     unsigned int _num_groups;
diff --git a/arm_compute/graph/nodes/ConcatenateLayerNode.h b/arm_compute/graph/nodes/ConcatenateLayerNode.h
index 8582403..13398b1 100644
--- a/arm_compute/graph/nodes/ConcatenateLayerNode.h
+++ b/arm_compute/graph/nodes/ConcatenateLayerNode.h
@@ -47,7 +47,8 @@
      *
      * @return Expected output descriptor
      */
-    static TensorDescriptor compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors, DataLayoutDimension axis);
+    static TensorDescriptor compute_output_descriptor(const std::vector<TensorDescriptor> &input_descriptors,
+                                                      DataLayoutDimension                  axis);
     /** Disables or not the depth concatenate node
      *
      * @warning This is used when concatenate is performed using sub-tensors, where this node is used as a placeholder.
@@ -78,7 +79,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     unsigned int                       _total_nodes;
diff --git a/arm_compute/graph/nodes/ConstNode.h b/arm_compute/graph/nodes/ConstNode.h
index b377c60..400b9b4 100644
--- a/arm_compute/graph/nodes/ConstNode.h
+++ b/arm_compute/graph/nodes/ConstNode.h
@@ -44,7 +44,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     TensorDescriptor _desc;
diff --git a/arm_compute/graph/nodes/ConvolutionLayerNode.h b/arm_compute/graph/nodes/ConvolutionLayerNode.h
index 99effa0..8a77b89 100644
--- a/arm_compute/graph/nodes/ConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/ConvolutionLayerNode.h
@@ -111,7 +111,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::ConvolutionLayer;
diff --git a/arm_compute/graph/nodes/DeconvolutionLayerNode.h b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
index e74adb1..553d059 100644
--- a/arm_compute/graph/nodes/DeconvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
@@ -61,7 +61,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     descriptors::DeconvolutionLayerDescriptor descriptor;
diff --git a/arm_compute/graph/nodes/DepthToSpaceLayerNode.h b/arm_compute/graph/nodes/DepthToSpaceLayerNode.h
index 25e30e2..5fbcc67 100644
--- a/arm_compute/graph/nodes/DepthToSpaceLayerNode.h
+++ b/arm_compute/graph/nodes/DepthToSpaceLayerNode.h
@@ -56,7 +56,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     int _block_shape;
diff --git a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
index 5df8698..441d68d 100644
--- a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
@@ -101,7 +101,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::DepthwiseConvolutionLayer;
diff --git a/arm_compute/graph/nodes/DequantizationLayerNode.h b/arm_compute/graph/nodes/DequantizationLayerNode.h
index 4910938..1cce713 100644
--- a/arm_compute/graph/nodes/DequantizationLayerNode.h
+++ b/arm_compute/graph/nodes/DequantizationLayerNode.h
@@ -46,8 +46,8 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 };
 } // namespace graph
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H */
\ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H */
diff --git a/arm_compute/graph/nodes/DetectionOutputLayerNode.h b/arm_compute/graph/nodes/DetectionOutputLayerNode.h
index b4b910c..c3e067e 100644
--- a/arm_compute/graph/nodes/DetectionOutputLayerNode.h
+++ b/arm_compute/graph/nodes/DetectionOutputLayerNode.h
@@ -51,13 +51,14 @@
      *
      * @return Output descriptor
      */
-    static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const DetectionOutputLayerInfo &info);
+    static TensorDescriptor compute_output_descriptor(const TensorDescriptor         &input_descriptor,
+                                                      const DetectionOutputLayerInfo &info);
 
     // Inherited overridden methods:
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     DetectionOutputLayerInfo _info;
diff --git a/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h b/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
index 6ff78ae..a53aaf2 100644
--- a/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
+++ b/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
@@ -49,7 +49,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     DetectionPostProcessLayerInfo _info;
@@ -59,4 +59,4 @@
 };
 } // namespace graph
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H */
\ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H */
diff --git a/arm_compute/graph/nodes/DummyNode.h b/arm_compute/graph/nodes/DummyNode.h
index 645f1b3..2263525 100644
--- a/arm_compute/graph/nodes/DummyNode.h
+++ b/arm_compute/graph/nodes/DummyNode.h
@@ -51,11 +51,11 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     TensorShape _shape;
 };
 } // namespace graph
 } // namespace arm_compute
-#endif /* ARM_COMPUTE_GRAPH_DUMMY_NODE_H */
\ No newline at end of file
+#endif /* ARM_COMPUTE_GRAPH_DUMMY_NODE_H */
diff --git a/arm_compute/graph/nodes/EltwiseLayerNode.h b/arm_compute/graph/nodes/EltwiseLayerNode.h
index 7a6d8e8..2582982 100644
--- a/arm_compute/graph/nodes/EltwiseLayerNode.h
+++ b/arm_compute/graph/nodes/EltwiseLayerNode.h
@@ -79,7 +79,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
     static constexpr NodeType node_type = NodeType::EltwiseLayer;
 
@@ -112,7 +112,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
     static constexpr NodeType node_type = NodeType::UnaryEltwiseLayer;
 
diff --git a/arm_compute/graph/nodes/FlattenLayerNode.h b/arm_compute/graph/nodes/FlattenLayerNode.h
index 046114c..af10470 100644
--- a/arm_compute/graph/nodes/FlattenLayerNode.h
+++ b/arm_compute/graph/nodes/FlattenLayerNode.h
@@ -41,7 +41,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/nodes/FullyConnectedLayerNode.h b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
index 9ade62b..3bcf386 100644
--- a/arm_compute/graph/nodes/FullyConnectedLayerNode.h
+++ b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
@@ -73,7 +73,7 @@
      */
     static TensorDescriptor compute_weights_descriptor(const TensorDescriptor &input_descriptor,
                                                        unsigned int            num_outputs,
-                                                       FullyConnectedLayerInfo fc_info            = FullyConnectedLayerInfo(),
+                                                       FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
                                                        const QuantizationInfo &weights_quant_info = QuantizationInfo());
     /** Computes fully connected layer output descriptor
      *
@@ -98,7 +98,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
     static constexpr NodeType node_type = NodeType::FullyConnectedLayer;
 
diff --git a/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h b/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h
index b0051b1..d891ea4 100644
--- a/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h
+++ b/arm_compute/graph/nodes/FusedConvolutionBatchNormalizationNode.h
@@ -43,7 +43,8 @@
      * @param[in] fast_math_hint   (Optional) Fast math hint
      * @param[in] fused_activation (Optional) Fused activation layer. Disabled if not specified
      */
-    FusedConvolutionBatchNormalizationNode(float epsilon, PadStrideInfo info,
+    FusedConvolutionBatchNormalizationNode(float               epsilon,
+                                           PadStrideInfo       info,
                                            unsigned int        num_groups       = 1,
                                            ConvolutionMethod   method           = ConvolutionMethod::Default,
                                            FastMathHint        fast_math_hint   = FastMathHint::Disabled,
@@ -122,7 +123,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::FusedConvolutionBatchNormalizationLayer;
diff --git a/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h b/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h
index a01cb9d..a61b155 100644
--- a/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h
+++ b/arm_compute/graph/nodes/FusedDepthwiseConvolutionBatchNormalizationNode.h
@@ -46,7 +46,7 @@
                                                     PadStrideInfo              info,
                                                     unsigned int               depth_multiplier,
                                                     DepthwiseConvolutionMethod method,
-                                                    ActivationLayerInfo        fused_activation = ActivationLayerInfo());
+                                                    ActivationLayerInfo fused_activation = ActivationLayerInfo());
 
     /** Sets the depthwise convolution layer method to use
      *
@@ -117,7 +117,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::FusedDepthwiseConvolutionBatchNormalizationLayer;
diff --git a/arm_compute/graph/nodes/GenerateProposalsLayerNode.h b/arm_compute/graph/nodes/GenerateProposalsLayerNode.h
index 6f8edc8..b5e4b97 100644
--- a/arm_compute/graph/nodes/GenerateProposalsLayerNode.h
+++ b/arm_compute/graph/nodes/GenerateProposalsLayerNode.h
@@ -50,7 +50,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     GenerateProposalsInfo _info;
diff --git a/arm_compute/graph/nodes/InputNode.h b/arm_compute/graph/nodes/InputNode.h
index 07091af..0983d25 100644
--- a/arm_compute/graph/nodes/InputNode.h
+++ b/arm_compute/graph/nodes/InputNode.h
@@ -44,7 +44,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     TensorDescriptor _desc;
diff --git a/arm_compute/graph/nodes/L2NormalizeLayerNode.h b/arm_compute/graph/nodes/L2NormalizeLayerNode.h
index 8edc5b0..ed11412 100644
--- a/arm_compute/graph/nodes/L2NormalizeLayerNode.h
+++ b/arm_compute/graph/nodes/L2NormalizeLayerNode.h
@@ -68,7 +68,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     int   _axis;
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index ae9f177..d4ad32b 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -50,18 +50,18 @@
 #include "arm_compute/graph/nodes/NormalizationLayerNode.h"
 #include "arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h"
 #include "arm_compute/graph/nodes/OutputNode.h"
-#include "arm_compute/graph/nodes/PReluLayerNode.h"
 #include "arm_compute/graph/nodes/PadLayerNode.h"
 #include "arm_compute/graph/nodes/PermuteLayerNode.h"
 #include "arm_compute/graph/nodes/PoolingLayerNode.h"
+#include "arm_compute/graph/nodes/PReluLayerNode.h"
 #include "arm_compute/graph/nodes/PrintLayerNode.h"
 #include "arm_compute/graph/nodes/PriorBoxLayerNode.h"
 #include "arm_compute/graph/nodes/QuantizationLayerNode.h"
-#include "arm_compute/graph/nodes/ROIAlignLayerNode.h"
 #include "arm_compute/graph/nodes/ReductionLayerNode.h"
 #include "arm_compute/graph/nodes/ReorgLayerNode.h"
 #include "arm_compute/graph/nodes/ReshapeLayerNode.h"
 #include "arm_compute/graph/nodes/ResizeLayerNode.h"
+#include "arm_compute/graph/nodes/ROIAlignLayerNode.h"
 #include "arm_compute/graph/nodes/SliceLayerNode.h"
 #include "arm_compute/graph/nodes/SoftmaxLayerNode.h"
 #include "arm_compute/graph/nodes/SplitLayerNode.h"
diff --git a/arm_compute/graph/nodes/NormalizationLayerNode.h b/arm_compute/graph/nodes/NormalizationLayerNode.h
index 503b859..86f2fb9 100644
--- a/arm_compute/graph/nodes/NormalizationLayerNode.h
+++ b/arm_compute/graph/nodes/NormalizationLayerNode.h
@@ -49,7 +49,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     NormalizationLayerInfo _info;
diff --git a/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h b/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h
index 4d84c20..158acc4 100644
--- a/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h
+++ b/arm_compute/graph/nodes/NormalizePlanarYUVLayerNode.h
@@ -41,7 +41,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/nodes/OutputNode.h b/arm_compute/graph/nodes/OutputNode.h
index c91bc6b..75484ab 100644
--- a/arm_compute/graph/nodes/OutputNode.h
+++ b/arm_compute/graph/nodes/OutputNode.h
@@ -41,7 +41,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/nodes/PReluLayerNode.h b/arm_compute/graph/nodes/PReluLayerNode.h
index b8e6c1a..532fdcc 100644
--- a/arm_compute/graph/nodes/PReluLayerNode.h
+++ b/arm_compute/graph/nodes/PReluLayerNode.h
@@ -41,7 +41,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 };
 } // namespace graph
 } // namespace arm_compute
diff --git a/arm_compute/graph/nodes/PadLayerNode.h b/arm_compute/graph/nodes/PadLayerNode.h
index d6ff355..dcb5ea5 100644
--- a/arm_compute/graph/nodes/PadLayerNode.h
+++ b/arm_compute/graph/nodes/PadLayerNode.h
@@ -56,7 +56,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::PadLayer;
diff --git a/arm_compute/graph/nodes/PermuteLayerNode.h b/arm_compute/graph/nodes/PermuteLayerNode.h
index 0b2380b..62654e7 100644
--- a/arm_compute/graph/nodes/PermuteLayerNode.h
+++ b/arm_compute/graph/nodes/PermuteLayerNode.h
@@ -51,7 +51,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     PermutationVector _perm;
diff --git a/arm_compute/graph/nodes/PoolingLayerNode.h b/arm_compute/graph/nodes/PoolingLayerNode.h
index b336bb9..c81f3f9 100644
--- a/arm_compute/graph/nodes/PoolingLayerNode.h
+++ b/arm_compute/graph/nodes/PoolingLayerNode.h
@@ -57,7 +57,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     PoolingLayerInfo _info;
diff --git a/arm_compute/graph/nodes/PrintLayerNode.h b/arm_compute/graph/nodes/PrintLayerNode.h
index b57ac1f..e7accc8 100644
--- a/arm_compute/graph/nodes/PrintLayerNode.h
+++ b/arm_compute/graph/nodes/PrintLayerNode.h
@@ -43,7 +43,9 @@
      * @param[in] format_info (Optional) Format info.
      * @param[in] transform   (Optional) Input transform function.
      */
-    PrintLayerNode(std::ostream &stream, const IOFormatInfo &format_info = IOFormatInfo(), const std::function<ITensor *(ITensor *)> transform = nullptr);
+    PrintLayerNode(std::ostream                             &stream,
+                   const IOFormatInfo                       &format_info = IOFormatInfo(),
+                   const std::function<ITensor *(ITensor *)> transform   = nullptr);
 
     /** Stream metadata accessor
      *
@@ -67,7 +69,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     std::ostream                             &_stream;
diff --git a/arm_compute/graph/nodes/PriorBoxLayerNode.h b/arm_compute/graph/nodes/PriorBoxLayerNode.h
index c7eadd1..db36bfb 100644
--- a/arm_compute/graph/nodes/PriorBoxLayerNode.h
+++ b/arm_compute/graph/nodes/PriorBoxLayerNode.h
@@ -51,13 +51,14 @@
      *
      * @return Output descriptor
      */
-    static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const PriorBoxLayerInfo &info);
+    static TensorDescriptor compute_output_descriptor(const TensorDescriptor  &input_descriptor,
+                                                      const PriorBoxLayerInfo &info);
 
     // Inherited overridden methods:
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     PriorBoxLayerInfo _info;
diff --git a/arm_compute/graph/nodes/QuantizationLayerNode.h b/arm_compute/graph/nodes/QuantizationLayerNode.h
index e5d81af..b8e4c7d 100644
--- a/arm_compute/graph/nodes/QuantizationLayerNode.h
+++ b/arm_compute/graph/nodes/QuantizationLayerNode.h
@@ -51,7 +51,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
     static constexpr NodeType node_type = NodeType::QuantizationLayer;
 
diff --git a/arm_compute/graph/nodes/ROIAlignLayerNode.h b/arm_compute/graph/nodes/ROIAlignLayerNode.h
index 5abd065..70309a5 100644
--- a/arm_compute/graph/nodes/ROIAlignLayerNode.h
+++ b/arm_compute/graph/nodes/ROIAlignLayerNode.h
@@ -56,7 +56,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     ROIPoolingLayerInfo _pool_info;
diff --git a/arm_compute/graph/nodes/ReductionLayerNode.h b/arm_compute/graph/nodes/ReductionLayerNode.h
index b8d2959..ff99466 100644
--- a/arm_compute/graph/nodes/ReductionLayerNode.h
+++ b/arm_compute/graph/nodes/ReductionLayerNode.h
@@ -56,7 +56,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     ReductionOperation _op;
diff --git a/arm_compute/graph/nodes/ReorgLayerNode.h b/arm_compute/graph/nodes/ReorgLayerNode.h
index 986692e..a3bbcdb 100644
--- a/arm_compute/graph/nodes/ReorgLayerNode.h
+++ b/arm_compute/graph/nodes/ReorgLayerNode.h
@@ -57,7 +57,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     int _stride;
diff --git a/arm_compute/graph/nodes/ReshapeLayerNode.h b/arm_compute/graph/nodes/ReshapeLayerNode.h
index 727d253..992275c 100644
--- a/arm_compute/graph/nodes/ReshapeLayerNode.h
+++ b/arm_compute/graph/nodes/ReshapeLayerNode.h
@@ -44,7 +44,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     TensorShape _shape;
diff --git a/arm_compute/graph/nodes/ResizeLayerNode.h b/arm_compute/graph/nodes/ResizeLayerNode.h
index 79f8889..480d6e5 100644
--- a/arm_compute/graph/nodes/ResizeLayerNode.h
+++ b/arm_compute/graph/nodes/ResizeLayerNode.h
@@ -51,7 +51,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     InterpolationPolicy _policy;
diff --git a/arm_compute/graph/nodes/SliceLayerNode.h b/arm_compute/graph/nodes/SliceLayerNode.h
index 08d3794..63f266b 100644
--- a/arm_compute/graph/nodes/SliceLayerNode.h
+++ b/arm_compute/graph/nodes/SliceLayerNode.h
@@ -51,7 +51,8 @@
      * @return  Output descriptor
      */
     static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
-                                                      const Coordinates &starts, const Coordinates &ends);
+                                                      const Coordinates      &starts,
+                                                      const Coordinates      &ends);
     /** Start coordinates accessor
      *
      * @return Start coordinates of the dimensions
@@ -67,7 +68,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     Coordinates _starts;
diff --git a/arm_compute/graph/nodes/SoftmaxLayerNode.h b/arm_compute/graph/nodes/SoftmaxLayerNode.h
index 0868c6f..2cb1ac2 100644
--- a/arm_compute/graph/nodes/SoftmaxLayerNode.h
+++ b/arm_compute/graph/nodes/SoftmaxLayerNode.h
@@ -49,7 +49,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 public:
     static constexpr NodeType node_type = NodeType::SoftmaxLayer;
diff --git a/arm_compute/graph/nodes/SplitLayerNode.h b/arm_compute/graph/nodes/SplitLayerNode.h
index 13cccdd..5e6df53 100644
--- a/arm_compute/graph/nodes/SplitLayerNode.h
+++ b/arm_compute/graph/nodes/SplitLayerNode.h
@@ -55,7 +55,9 @@
      * @return  A pair with the descriptor of the split and the starting coordinates
      */
     std::pair<TensorDescriptor, Coordinates> compute_output_descriptor(const TensorDescriptor &input_descriptor,
-                                                                       unsigned int num_splits, int axis, unsigned int idx);
+                                                                       unsigned int            num_splits,
+                                                                       int                     axis,
+                                                                       unsigned int            idx);
     /** Number of splits accessor
      *
      * @return Number of splits
@@ -72,7 +74,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     unsigned int     _num_splits;
diff --git a/arm_compute/graph/nodes/StackLayerNode.h b/arm_compute/graph/nodes/StackLayerNode.h
index 2990895..9f0767c 100644
--- a/arm_compute/graph/nodes/StackLayerNode.h
+++ b/arm_compute/graph/nodes/StackLayerNode.h
@@ -58,7 +58,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     unsigned int _total_nodes;
diff --git a/arm_compute/graph/nodes/StridedSliceLayerNode.h b/arm_compute/graph/nodes/StridedSliceLayerNode.h
index 6039f31..f521feb 100644
--- a/arm_compute/graph/nodes/StridedSliceLayerNode.h
+++ b/arm_compute/graph/nodes/StridedSliceLayerNode.h
@@ -84,7 +84,7 @@
     NodeType         type() const override;
     bool             forward_descriptors() override;
     TensorDescriptor configure_output(size_t idx) const override;
-    void accept(INodeVisitor &v) override;
+    void             accept(INodeVisitor &v) override;
 
 private:
     Coordinates           _starts;
diff --git a/arm_compute/graph/printers/DotGraphPrinter.h b/arm_compute/graph/printers/DotGraphPrinter.h
index 564aecf..6638033 100644
--- a/arm_compute/graph/printers/DotGraphPrinter.h
+++ b/arm_compute/graph/printers/DotGraphPrinter.h
@@ -25,7 +25,6 @@
 #define ACL_ARM_COMPUTE_GRAPH_PRINTERS_DOTGRAPHPRINTER_H
 
 #include "arm_compute/graph/IGraphPrinter.h"
-
 #include "arm_compute/graph/INodeVisitor.h"
 
 #include <string>