COMPMID-1505: Add native grouping support at graph level

Change-Id: Iedc91b0aee743b59af5140c8acb8124548da3163
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144362
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
diff --git a/arm_compute/graph/INode.h b/arm_compute/graph/INode.h
index f8101d7..4219150 100644
--- a/arm_compute/graph/INode.h
+++ b/arm_compute/graph/INode.h
@@ -224,6 +224,11 @@
      * @return Number of outputs
      */
     size_t num_outputs() const;
+    /** Returns common node parameters
+     *
+     * @return Common node parameters
+     */
+    NodeParams common_node_params() const;
     /** Returns requested target for this node
      *
      * @return Requested execution target
diff --git a/arm_compute/graph/PassManager.h b/arm_compute/graph/PassManager.h
index 9f32a45..27f7794 100644
--- a/arm_compute/graph/PassManager.h
+++ b/arm_compute/graph/PassManager.h
@@ -74,13 +74,13 @@
     void clear();
     /** Runs all the mutation passes on a given graph
      *
-     * @param[in] g Graph to run the mutations on
+     * @param[in, out] g Graph to run the mutations on
      */
     void run_all(Graph &g);
     /** Runs a specific mutation pass on a given graph
      *
-     * @param[in] g     Graph to run the mutation on
-     * @param[in] index Index of the mutation to execute
+     * @param[in, out] g     Graph to run the mutation on
+     * @param[in]      index Index of the mutation to execute
      */
     void run(Graph &g, size_t index);
 
diff --git a/arm_compute/graph/Utils.h b/arm_compute/graph/Utils.h
index 3604bad..1a0509b 100644
--- a/arm_compute/graph/Utils.h
+++ b/arm_compute/graph/Utils.h
@@ -115,6 +115,18 @@
  * @return Idx of given dimension
  */
 size_t get_dimension_idx(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension);
+/** Get the list of driving nodes of a given node
+ *
+ * @param[in] node Node to find the driving node of
+ *
+ * @return A list with the driving node of a given node
+ */
+std::vector<NodeIdxPair> get_driving_nodes(const INode &node);
+/** Configures tensor
+ *
+ * @param[in, out] tensor Tensor to configure
+ */
+void configure_tensor(Tensor *tensor);
 } // namespace graph
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_GRAPH_UTILS_H__ */
diff --git a/arm_compute/graph/algorithms/Algorithms.h b/arm_compute/graph/algorithms/Algorithms.h
index f89856f..7af2455 100644
--- a/arm_compute/graph/algorithms/Algorithms.h
+++ b/arm_compute/graph/algorithms/Algorithms.h
@@ -24,6 +24,6 @@
 #ifndef __ARM_COMPUTE_GRAPH_ALGORITHMS_H__
 #define __ARM_COMPUTE_GRAPH_ALGORITHMS_H__
 
-#include "arm_compute/graph/algorithms/BFS.h"
+#include "arm_compute/graph/algorithms/TopologicalSort.h"
 
 #endif /* __ARM_COMPUTE_GRAPH_ALGORITHMS_H__ */
diff --git a/arm_compute/graph/algorithms/BFS.h b/arm_compute/graph/algorithms/BFS.h
deleted file mode 100644
index 97292d7..0000000
--- a/arm_compute/graph/algorithms/BFS.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_GRAPH_ALGORITHM_BFS_H__
-#define __ARM_COMPUTE_GRAPH_ALGORITHM_BFS_H__
-
-#include "arm_compute/graph/Graph.h"
-
-#include <list>
-#include <vector>
-
-namespace arm_compute
-{
-namespace graph
-{
-namespace detail
-{
-/** Checks if all the input dependencies of a node have been visited
- *
- * @param[in] node    Node to check
- * @param[in] visited Vector that contains the visited information
- *
- * @return True if all inputs dependencies have been visited else false
- */
-inline bool all_inputs_are_visited(const INode *node, const std::vector<bool> &visited)
-{
-    ARM_COMPUTE_ERROR_ON(node == nullptr);
-    const Graph *graph = node->graph();
-    ARM_COMPUTE_ERROR_ON(graph == nullptr);
-
-    bool are_all_visited = true;
-    for(const auto &input_edge_id : node->input_edges())
-    {
-        if(input_edge_id != EmptyNodeID)
-        {
-            const Edge *input_edge = graph->edge(input_edge_id);
-            ARM_COMPUTE_ERROR_ON(input_edge == nullptr);
-            ARM_COMPUTE_ERROR_ON(input_edge->producer() == nullptr);
-            if(!visited[input_edge->producer_id()])
-            {
-                are_all_visited = false;
-                break;
-            }
-        }
-    }
-
-    return are_all_visited;
-}
-} // namespace detail
-
-/** Breadth first search traversal
- *
- * @param g Graph to traverse
- *
- * @return A vector with the node id traversal order
- */
-inline std::vector<NodeID> bfs(Graph &g)
-{
-    std::vector<NodeID> bfs_order_vector;
-
-    // Created visited vector
-    std::vector<bool> visited(g.nodes().size(), false);
-
-    // Create BFS queue
-    std::list<NodeID> queue;
-
-    // Push inputs and mark as visited
-    for(auto &input : g.nodes(NodeType::Input))
-    {
-        if(input != EmptyNodeID)
-        {
-            visited[input] = true;
-            queue.push_back(input);
-        }
-    }
-
-    // Iterate over vector and edges
-    while(!queue.empty())
-    {
-        // Dequeue a node from queue and process
-        NodeID n = queue.front();
-        bfs_order_vector.push_back(n);
-        queue.pop_front();
-
-        const INode *node = g.node(n);
-        ARM_COMPUTE_ERROR_ON(node == nullptr);
-        for(const auto &eid : node->output_edges())
-        {
-            const Edge *e = g.edge(eid);
-            ARM_COMPUTE_ERROR_ON(e == nullptr);
-            if(!visited[e->consumer_id()] && detail::all_inputs_are_visited(e->consumer(), visited))
-            {
-                visited[e->consumer_id()] = true;
-                queue.push_back(e->consumer_id());
-            }
-        }
-    }
-
-    return bfs_order_vector;
-}
-} // namespace graph
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_GRAPH_ALGORITHM_BFS_H__ */
diff --git a/arm_compute/graph/algorithms/TopologicalSort.h b/arm_compute/graph/algorithms/TopologicalSort.h
new file mode 100644
index 0000000..6684844
--- /dev/null
+++ b/arm_compute/graph/algorithms/TopologicalSort.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_ALGORITHM_TOPOLOGICAL_SORT_H__
+#define __ARM_COMPUTE_GRAPH_ALGORITHM_TOPOLOGICAL_SORT_H__
+
+#include "arm_compute/graph/Types.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph
+{
+// Forward declarations
+class Graph;
+
+/** Breadth first search traversal
+ *
+ * @param g Graph to traverse
+ *
+ * @return A vector with the node id traversal order
+ */
+std::vector<NodeID> bfs(Graph &g);
+/** Depth first search traversal
+ *
+ * @param g Graph to traverse
+ *
+ * @return A vector with the node id traversal order
+ */
+std::vector<NodeID> dfs(Graph &g);
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_ALGORITHM_TOPOLOGICAL_SORT_H__ */
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 5739773..4a3f001 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -265,6 +265,7 @@
     }
 
     const PadStrideInfo     conv_info      = node.convolution_info();
+    const unsigned int      num_groups     = node.num_groups();
     const ConvolutionMethod conv_algorithm = node.convolution_method();
     const bool              fast_math      = node.fast_math_hint() == FastMathHint::Enabled;
 
@@ -275,12 +276,14 @@
 
     if(conv_algorithm == ConvolutionMethod::Winograd)
     {
+        ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
                                         std::string("WinogradConvolutionLayer"), mm,
                                         input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
     }
     else if(conv_algorithm == ConvolutionMethod::Direct)
     {
+        ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
         std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
                                         std::string("DirectConvolutionLayer"),
                                         input, weights, biases, output, conv_info);
@@ -289,19 +292,22 @@
     {
         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
                                         std::string("GEMMConvolutionLayer"), mm,
-                                        input, weights, biases, output, conv_info);
+                                        input, weights, biases, output, conv_info,
+                                        WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), num_groups);
     }
     else
     {
         std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
                                         std::string("GenericConvolutionLayer"), mm,
-                                        input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
+                                        input, weights, biases, output, conv_info,
+                                        WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math, num_groups);
     }
 
     // Log info
     ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
                                << " Target " << TargetInfo::TargetType
                                << " Data Type: " << input->info()->data_type()
+                               << " Groups: " << num_groups
                                << " Input QuantInfo: " << input->info()->quantization_info()
                                << " Weights QuantInfo: " << weights->info()->quantization_info()
                                << " Input shape: " << input->info()->tensor_shape()
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index ec84399..3064db2 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -107,37 +107,30 @@
     const PadStrideInfo     conv_info      = node.convolution_info();
     const ConvolutionMethod conv_algorithm = node.convolution_method();
     const bool              fast_math      = node.fast_math_hint() == FastMathHint::Enabled;
+    const unsigned int      num_groups     = node.num_groups();
 
     // Validate function
     Status status{};
     switch(conv_algorithm)
     {
         case ConvolutionMethod::Direct:
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
             status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
             break;
         case ConvolutionMethod::GEMM:
-            status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info);
+            status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
+                                                    WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
             break;
         case ConvolutionMethod::Winograd:
+            ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
             status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
             break;
         case ConvolutionMethod::Default:
-            status = ConvolutionLayer::validate(input, weights, biases, output, conv_info);
+            status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
+                                                WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
             break;
         default:
-            break;
-    }
-
-    // If validation fails try the Default approach
-    if(!bool(status))
-    {
-        status = ConvolutionLayer::validate(input, weights, biases, output, conv_info /*, fast_math*/);
-        if(bool(status))
-        {
-            ARM_COMPUTE_LOG_GRAPH_INFO("Switched ConvolutionLayer method of node with ID : "
-                                       << node.id() << " and Name: " << node.name() << std::endl);
-            node.set_convolution_method(ConvolutionMethod::Default);
-        }
+            ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
     }
 
     return status;
@@ -160,20 +153,30 @@
     ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
 
     // Extract IO and info
-    arm_compute::ITensorInfo        *weights       = detail::get_backing_tensor_info(node.input(1));
-    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
-    ARM_COMPUTE_ERROR_ON(weights == nullptr);
+    arm_compute::ITensorInfo *input   = detail::get_backing_tensor_info(node.input(0));
+    arm_compute::ITensorInfo *weights = detail::get_backing_tensor_info(node.input(1));
+    arm_compute::ITensorInfo *biases  = get_backing_tensor_info(node.input(2));
+    arm_compute::ITensorInfo *output  = get_backing_tensor_info(node.output(0));
 
-    // TODO (geopin01) : Switch when validation is implemented
+    const PadStrideInfo              conv_info     = node.convolution_info();
+    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
+
     // Validate function
-    if((dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3) && (weights->tensor_shape()[get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH)] != 3))
+    Status status{};
+    switch(dwc_algorithm)
     {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Switched DepthwiseConvolutionLayer method of node with ID : "
-                                   << node.id() << " and Name: " << node.name() << std::endl);
-        node.set_depthwise_convolution_method(DepthwiseConvolutionMethod::Default);
+        case DepthwiseConvolutionMethod::Default:
+        case DepthwiseConvolutionMethod::GEMV:
+            status = DepthwiseConvolutionLayer::validate(input, weights, biases, output, conv_info);
+            break;
+        case DepthwiseConvolutionMethod::Optimized3x3:
+            status = DepthwiseConvolutionLayer3x3::validate(input, weights, biases, output, conv_info);
+            break;
+        default:
+            ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported depthwise convolution method");
     }
 
-    return Status{};
+    return status;
 }
 
 /** Validates a permute layer node
diff --git a/arm_compute/graph/detail/ExecutionHelpers.h b/arm_compute/graph/detail/ExecutionHelpers.h
index 3a35777..fd8d082 100644
--- a/arm_compute/graph/detail/ExecutionHelpers.h
+++ b/arm_compute/graph/detail/ExecutionHelpers.h
@@ -71,12 +71,13 @@
 void allocate_all_tensors(Graph &g);
 /** Configures all nodes of graph
  *
- * @param[in] g   Graph to configure the nodes
- * @param[in] ctx Graph context to use
+ * @param[in, out] g          Graph to configure the nodes
+ * @param[in]      ctx        Graph context to use
+ * @param[in]      node_order The order to configure the nodes
  *
  * @return The execution workload
  */
-ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx);
+ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::vector<NodeID> &node_order);
 /** Release the memory of all unused const nodes
  *
  * @param[in] g Graph to release the memory from
diff --git a/arm_compute/graph/mutators/GraphMutators.h b/arm_compute/graph/mutators/GraphMutators.h
index a91bc91..67c4d7a 100644
--- a/arm_compute/graph/mutators/GraphMutators.h
+++ b/arm_compute/graph/mutators/GraphMutators.h
@@ -25,7 +25,9 @@
 #define __ARM_COMPUTE_GRAPH_GRAPH_MUTATORS_H__
 
 #include "arm_compute/graph/mutators/DepthConcatSubTensorMutator.h"
+#include "arm_compute/graph/mutators/GroupedConvolutionMutator.h"
 #include "arm_compute/graph/mutators/InPlaceOperationMutator.h"
+#include "arm_compute/graph/mutators/NodeExecutionMethodMutator.h"
 #include "arm_compute/graph/mutators/NodeFusionMutator.h"
 #include "arm_compute/graph/mutators/SplitLayerSubTensorMutator.h"
 
diff --git a/arm_compute/graph/mutators/GroupedConvolutionMutator.h b/arm_compute/graph/mutators/GroupedConvolutionMutator.h
new file mode 100644
index 0000000..8ce2554
--- /dev/null
+++ b/arm_compute/graph/mutators/GroupedConvolutionMutator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_GROUPED_CONVOLUTION_MUTATOR_H__
+#define __ARM_COMPUTE_GRAPH_GROUPED_CONVOLUTION_MUTATOR_H__
+
+#include "arm_compute/graph/IGraphMutator.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Mutation pass to implement/optimize grouped convolutions
+ *
+ * @warning This is compulsory to run in case of grouped convolutions
+ **/
+class GroupedConvolutionMutator final : public IGraphMutator
+{
+public:
+    // Inherited methods overridden
+    virtual void mutate(Graph &g) override;
+    const char *name() override;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_GROUPED_CONVOLUTION_MUTATOR_H__ */
diff --git a/arm_compute/graph/mutators/NodeExecutionMethodMutator.h b/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
new file mode 100644
index 0000000..f961f5d
--- /dev/null
+++ b/arm_compute/graph/mutators/NodeExecutionMethodMutator.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_NODE_EXECUTION_METHOD_MUTATOR_H__
+#define __ARM_COMPUTE_GRAPH_NODE_EXECUTION_METHOD_MUTATOR_H__
+
+#include "arm_compute/graph/IGraphMutator.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Mutation pass to fall-back to default execution method
+ *
+ * @note This operates on nodes that support multiple execution methods (e.g. ConvolutionLayerNode)
+ *       and in case the requested execution method is not supported for a given configuration.
+ *       Thus this is a fall-back mechanism to ensure graph execution.
+ */
+class NodeExecutionMethodMutator final : public IGraphMutator
+{
+public:
+    // Inherited methods overridden
+    virtual void mutate(Graph &g) override;
+    const char *name() override;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_NODE_EXECUTION_METHOD_MUTATOR_H__ */
diff --git a/arm_compute/graph/nodes/ConvolutionLayerNode.h b/arm_compute/graph/nodes/ConvolutionLayerNode.h
index 4299be6..0698ac1 100644
--- a/arm_compute/graph/nodes/ConvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/ConvolutionLayerNode.h
@@ -37,11 +37,13 @@
     /** Constructor
      *
      * @param[in] info           Convolution layer attributes
+     * @param[in] num_groups     (Optional) Number of groups (Defaults to 1)
      * @param[in] method         (Optional) Convolution method to use
      * @param[in] fast_math_hint (Optional) Fast math hint
      * @param[in] out_quant_info (Optional) Output quantization info
      */
     ConvolutionLayerNode(PadStrideInfo     info,
+                         unsigned int      num_groups     = 1,
                          ConvolutionMethod method         = ConvolutionMethod::Default,
                          FastMathHint      fast_math_hint = FastMathHint::Disabled,
                          QuantizationInfo  out_quant_info = QuantizationInfo());
@@ -73,6 +75,11 @@
      * @return Convolution information
      */
     PadStrideInfo convolution_info() const;
+    /** Number of groups in convolution accessor
+     *
+     * @return Number of groups in convolution
+     */
+    unsigned int num_groups() const;
     /** Computes convolution output descriptor
      *
      * @param[in] input_descriptor   Input descriptor
@@ -93,6 +100,7 @@
 
 private:
     PadStrideInfo     _info;
+    unsigned int      _num_groups;
     ConvolutionMethod _method;
     FastMathHint      _fast_math_hint;
     QuantizationInfo  _out_quant_info;