Fuse pad layer with subsequent convolution layer

Fusing occurs only if
  - the padding is only for height/width
  - padding pixel value is 0
  - padding node output has no accessors

Resolves: COMPMID-4702

Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Change-Id: I0755d5fb0bd3a55d9f10b32ce9da44e7c5a25279
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6189
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/graph/Utils.h b/arm_compute/graph/Utils.h
index 43a8eca..a3d9012 100644
--- a/arm_compute/graph/Utils.h
+++ b/arm_compute/graph/Utils.h
@@ -132,6 +132,13 @@
  * @return A list with the driving node of a given node
  */
 std::vector<NodeIdxPair> get_driving_nodes(const INode &node);
+/** Get the list of driver nodes of a given node
+ *
+ * @param[in] node Node to find the driver node of
+ *
+ * @return A list with the driver node of a given node
+ */
+std::vector<NodeIdxPair> get_driver_nodes(const INode &node);
 /** Configures tensor
  *
  * @param[in, out] tensor Tensor to configure
diff --git a/arm_compute/graph/nodes/ConvolutionLayerNode.h b/arm_compute/graph/nodes/ConvolutionLayerNode.h
index e4151c0..99effa0 100644
--- a/arm_compute/graph/nodes/ConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/ConvolutionLayerNode.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2019, 2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -90,6 +90,11 @@
      * @param[in] fused_activation Fused activation to set
      */
     void set_fused_activation(ActivationLayerInfo fused_activation);
+    /** Sets convolution info
+     *
+     * @param[in] info Convolution info to set
+     */
+    void set_convolution_info(PadStrideInfo info);
     /** Computes convolution output descriptor
      *
      * @param[in] input_descriptor   Input descriptor
diff --git a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
index 59847a9..5df8698 100644
--- a/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2019, 2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -78,6 +78,11 @@
      * @param[in] fused_activation Fused activation to set
      */
     void set_fused_activation(ActivationLayerInfo fused_activation);
+    /** Sets convolution info
+     *
+     * @param[in] info Convolution info to set
+     */
+    void set_convolution_info(PadStrideInfo info);
     /** Computes depthwise convolution output descriptor
      *
      * @param[in] input_descriptor   Input descriptor
diff --git a/arm_compute/graph/nodes/PadLayerNode.h b/arm_compute/graph/nodes/PadLayerNode.h
index 8fcbc52..d6ff355 100644
--- a/arm_compute/graph/nodes/PadLayerNode.h
+++ b/arm_compute/graph/nodes/PadLayerNode.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -58,6 +58,9 @@
     TensorDescriptor configure_output(size_t idx) const override;
     void accept(INodeVisitor &v) override;
 
+public:
+    static constexpr NodeType node_type = NodeType::PadLayer;
+
 private:
     PaddingList _padding;
     PixelValue  _pad_value;
diff --git a/src/graph/Utils.cpp b/src/graph/Utils.cpp
index 7309737..7db06b9 100644
--- a/src/graph/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -194,6 +194,26 @@
     return driving_nodes;
 }
 
+std::vector<NodeIdxPair> get_driver_nodes(const INode &node)
+{
+    std::vector<NodeIdxPair> driver_nodes;
+
+    const Graph *g = node.graph();
+    ARM_COMPUTE_ERROR_ON(g == nullptr);
+
+    for(auto &input_edge_id : node.input_edges())
+    {
+        auto input_edge = g->edge(input_edge_id);
+        if(input_edge != nullptr)
+        {
+            ARM_COMPUTE_ERROR_ON(input_edge->producer() == nullptr);
+            driver_nodes.push_back({ input_edge->producer_id(), input_edge->producer_idx() });
+        }
+    }
+
+    return driver_nodes;
+}
+
 void configure_tensor(Tensor *tensor)
 {
     if(tensor != nullptr && tensor->handle() == nullptr)
diff --git a/src/graph/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
index 5a696f8..b530fb0 100644
--- a/src/graph/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -265,6 +265,74 @@
     }
 }
 
+bool check_padding_info(const DataLayout &layout, const PaddingList &padding_list, PaddingInfo &pad_w, PaddingInfo &pad_h)
+{
+    if(layout == DataLayout::NCHW || layout == DataLayout::NHWC)
+    {
+        const PaddingInfo zero_padding(0, 0);
+
+        const unsigned int height_index = get_dimension_idx(layout, DataLayoutDimension::HEIGHT);
+        const unsigned int width_index  = get_dimension_idx(layout, DataLayoutDimension::WIDTH);
+
+        pad_w = width_index < padding_list.size() ? padding_list[width_index] : zero_padding;
+        pad_h = height_index < padding_list.size() ? padding_list[height_index] : zero_padding;
+
+        for(unsigned int i = 0; i < padding_list.size(); i++)
+        {
+            if(i != height_index && i != width_index && padding_list[i] != zero_padding)
+            {
+                // if the index is not either height or width, don't fuse
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    return false;
+}
+
+template <typename N>
+void fuse_pad_with_convolution(Graph &g, const Edge *output_edge)
+{
+    auto *pad_node  = arm_compute::utils::cast::polymorphic_downcast<PadLayerNode *>(output_edge->producer());
+    auto *conv_node = arm_compute::utils::cast::polymorphic_downcast<N *>(output_edge->consumer());
+
+    const Edge *input_edge = pad_node->input_edge(0);
+    if(input_edge != nullptr && input_edge->tensor() != nullptr && pad_node->output(0)->accessor() == nullptr
+       && pad_node->pad_value().get<float>() == 0.0)
+    {
+        const DataLayout  layout       = input_edge->tensor()->desc().layout;
+        const PaddingList padding_list = pad_node->padding();
+        PaddingInfo       pad_w, pad_h;
+
+        if(check_padding_info(layout, padding_list, pad_w, pad_h))
+        {
+            // Add paddings to the convolution node
+            const PadStrideInfo conv_info = conv_node->convolution_info();
+            const PadStrideInfo new_conv_info(
+                conv_info.stride().first,
+                conv_info.stride().second,
+                conv_info.pad_left() + pad_w.first,
+                conv_info.pad_right() + pad_w.second,
+                conv_info.pad_top() + pad_h.first,
+                conv_info.pad_bottom() + pad_h.second,
+                conv_info.round());
+            conv_node->set_convolution_info(new_conv_info);
+
+            // Update drivers of the convolution node
+            std::vector<NodeIdxPair> pad_driver_nodes = get_driver_nodes(*pad_node);
+            g.remove_node(pad_node->id());
+
+            // Update fused node inputs
+            for(auto &driver_node : pad_driver_nodes)
+            {
+                g.add_connection(driver_node.node_id, driver_node.index, conv_node->id(), 0);
+            }
+        }
+    }
+}
+
 template <typename N1, typename N2, typename F, typename... Args>
 void fuse_layer(Graph &g, std::function<bool(INode &)> const &prec, const F fuse_fcn, Args &&... optional_arguments)
 {
@@ -333,6 +401,8 @@
     };
 
     // Fusion mutations
+    detail::fuse_layer<PadLayerNode, ConvolutionLayerNode>(g, empty_prec, detail::fuse_pad_with_convolution<ConvolutionLayerNode>);
+    detail::fuse_layer<PadLayerNode, DepthwiseConvolutionLayerNode>(g, empty_prec, detail::fuse_pad_with_convolution<DepthwiseConvolutionLayerNode>);
     detail::fuse_layer<BatchNormalizationLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<BatchNormalizationLayerNode>, supported_fused_activations);
     detail::fuse_layer<ConvolutionLayerNode, ActivationLayerNode>(g, empty_prec, detail::fuse_node_with_activation<ConvolutionLayerNode>, supported_fused_activations);
     detail::fuse_layer<DepthwiseConvolutionLayerNode, ActivationLayerNode>(g, qs8_prec, detail::fuse_node_with_activation<DepthwiseConvolutionLayerNode>, supported_fused_activations);
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index a982570..ee9dde9 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2019, 2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -83,6 +83,11 @@
     _fused_activation = fused_activation;
 }
 
+void ConvolutionLayerNode::set_convolution_info(PadStrideInfo info)
+{
+    _info = info;
+}
+
 TensorDescriptor ConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
                                                                  const TensorDescriptor &weights_descriptor,
                                                                  const PadStrideInfo    &info)
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index 42fb0fd..7de2016 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2019, 2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -70,6 +70,11 @@
     _fused_activation = fused_activation;
 }
 
+void DepthwiseConvolutionLayerNode::set_convolution_info(PadStrideInfo info)
+{
+    _info = info;
+}
+
 TensorDescriptor DepthwiseConvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
                                                                           const TensorDescriptor &weights_descriptor,
                                                                           const PadStrideInfo    &info,