COMPMID-601: Add GraphContext

GraphContext hold all the information about the hints that need to be
passed in the nodes. As these might expand, it serves as a centralized
class for such information.

Change-Id: I0b5527630fb97cc5fa500db0bac8307ff2ea36e6
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90300
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 525506f..25c4577 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -46,7 +46,7 @@
      *
      * @param _next_hint Device execution hint
      */
-    void configure(Hint _next_hint);
+    void configure(GraphHints _next_hints);
 
     /** Sets whether to enable information print out
      *
@@ -54,11 +54,12 @@
      */
     void set_info_enablement(bool is_enabled);
 
+    GraphContext                         _ctx{};
     std::vector<Stage>                   _pipeline{};
     std::vector<std::unique_ptr<Tensor>> _tensors{};
     std::vector<std::unique_ptr<INode>>  _nodes{};
-    Hint                                 _current_hint{ Hint::DONT_CARE };
-    Hint                                 _next_hint{ Hint::DONT_CARE };
+    GraphHints                           _current_hints{};
+    GraphHints                           _next_hints{};
     std::unique_ptr<Tensor>              _graph_input{ nullptr };
     std::unique_ptr<Tensor>              _graph_output{ nullptr };
     std::unique_ptr<INode>               _current_node{ nullptr };
@@ -66,8 +67,8 @@
     bool                                 _info_enabled{ false };
 
 private:
-    Tensor *_current_input{ nullptr };
-    Hint    _previous_hint{ Hint::DONT_CARE };
+    Tensor    *_current_input{ nullptr };
+    GraphHints _previous_hints{};
 };
 
 Graph::~Graph() //NOLINT
@@ -102,7 +103,7 @@
 }
 
 //Finalize current node's configuration
-void Graph::Private::configure(Hint _next_hint)
+void Graph::Private::configure(GraphHints _next_hints)
 {
     ARM_COMPUTE_ERROR_ON(_current_node == nullptr);
     ARM_COMPUTE_ERROR_ON(_graph_input == nullptr);
@@ -110,9 +111,9 @@
     // Is it the first node of the graph ?
     if(_current_input == nullptr)
     {
-        _graph_input->set_target(_current_hint);
-        _current_input = _graph_input.get();
-        _previous_hint = _current_hint; // For the first node just assume the previous node was of the same type as this one
+        _graph_input->set_target(_current_hints.target_hint());
+        _current_input  = _graph_input.get();
+        _previous_hints = _current_hints; // For the first node just assume the previous node was of the same type as this one
     }
 
     //Automatic output configuration ?
@@ -123,29 +124,31 @@
     }
 
     // If either the writer or reader node needs OpenCL then use OpenCL memory:
-    if((_next_hint == Hint::OPENCL || _current_hint == Hint::OPENCL))
+    if((_next_hints.target_hint() == TargetHint::OPENCL || _current_hints.target_hint() == TargetHint::OPENCL))
     {
-        _current_output->set_target(Hint::OPENCL);
+        _current_output->set_target(TargetHint::OPENCL);
     }
     else
     {
-        _current_output->set_target(Hint::NEON);
+        _current_output->set_target(TargetHint::NEON);
     }
 
-    // Map input if needed
-    std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_current_hint, _current_input->tensor(), _current_output->tensor());
+    // Update ctx and instantiate node
+    _ctx.hints()                                 = _current_hints;
+    std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_ctx, _current_input->tensor(), _current_output->tensor());
     _current_input->allocate();
 
-    if(_current_input->target() == Hint::OPENCL)
+    // Map input if needed
+    if(_current_input->target() == TargetHint::OPENCL)
     {
-        if(_previous_hint == Hint::NEON)
+        if(_previous_hints.target_hint() == TargetHint::NEON)
         {
-            ARM_COMPUTE_ERROR_ON(_current_hint == Hint::NEON);
+            ARM_COMPUTE_ERROR_ON(_current_hints.target_hint() == TargetHint::NEON);
             _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLUnmap>(_current_input) });
         }
-        if(_current_hint == Hint::NEON)
+        if(_current_hints.target_hint() == TargetHint::NEON)
         {
-            ARM_COMPUTE_ERROR_ON(_previous_hint == Hint::NEON);
+            ARM_COMPUTE_ERROR_ON(_previous_hints.target_hint() == TargetHint::NEON);
             _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLMap>(_current_input, true) });
         }
     }
@@ -154,8 +157,8 @@
 
     _current_input  = _current_output;
     _current_output = nullptr;
-    _previous_hint  = _current_hint;
-    _current_hint   = _next_hint;
+    std::swap(_previous_hints, _current_hints);
+    std::swap(_current_hints, _next_hints);
 }
 
 void Graph::Private::set_info_enablement(bool is_enabled)
@@ -169,12 +172,13 @@
     ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_output != nullptr, "Nothing can be added after the output tensor");
     //Trigger the creation of the current Node:
 
-    Hint _next_hint = node->override_hint(_pimpl->_next_hint);
-    ARM_COMPUTE_ERROR_ON(_next_hint == Hint::DONT_CARE);
+    GraphHints _next_hints = _pimpl->_next_hints;
+    _next_hints.set_target_hint(node->override_target_hint(_pimpl->_next_hints.target_hint()));
+    ARM_COMPUTE_ERROR_ON(_next_hints.target_hint() == TargetHint::DONT_CARE);
     if(_pimpl->_current_node)
     {
         //Finalize the previous Node:
-        _pimpl->configure(_pimpl->_next_hint);
+        _pimpl->configure(_pimpl->_next_hints);
 
         if(_pimpl->_info_enabled)
         {
@@ -183,8 +187,8 @@
     }
     else
     {
-        // If that's the first node then use the same Hint before and after the node.
-        _pimpl->_current_hint = _next_hint;
+        // If that's the first node then use the same TargetHint before and after the node.
+        _pimpl->_current_hints = _next_hints;
     }
     if(_pimpl->_current_node)
     {
@@ -192,15 +196,6 @@
     }
     _pimpl->_current_node = std::move(node);
 }
-void Graph::set_hint(Hint hint)
-{
-    _pimpl->_next_hint = hint;
-}
-
-void Graph::set_info_enablement(bool is_enabled)
-{
-    _pimpl->set_info_enablement(is_enabled);
-}
 
 //Add a tensor with an Accessor (i.e either the input or output of the graph)
 void Graph::add_tensor(std::unique_ptr<Tensor> tensor)
@@ -221,7 +216,7 @@
         _pimpl->_current_output = _pimpl->_graph_output.get();
 
         // Finalize the graph by configuring the last Node of the graph:
-        _pimpl->configure(_pimpl->_current_hint); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
+        _pimpl->configure(_pimpl->_current_hints); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
         _pimpl->_graph_output->allocate();
     }
 }
@@ -236,6 +231,16 @@
     _pimpl->_current_output = _pimpl->_tensors.back().get();
 }
 
+void Graph::set_info_enablement(bool is_enabled)
+{
+    _pimpl->set_info_enablement(is_enabled);
+}
+
+GraphHints &Graph::hints()
+{
+    return _pimpl->_next_hints;
+}
+
 Graph &arm_compute::graph::operator<<(Graph &graph, TensorInfo &&info)
 {
     graph.set_temp(std::move(info));
@@ -248,8 +253,14 @@
     return graph;
 }
 
-Graph &arm_compute::graph::operator<<(Graph &graph, Hint hint)
+Graph &arm_compute::graph::operator<<(Graph &graph, TargetHint target_hint)
 {
-    graph.set_hint(hint);
+    graph.hints().set_target_hint(target_hint);
+    return graph;
+}
+
+Graph &arm_compute::graph::operator<<(Graph &graph, ConvolutionMethodHint conv_method_hint)
+{
+    graph.hints().set_convolution_method_hint(conv_method_hint);
     return graph;
 }
diff --git a/src/graph/GraphContext.cpp b/src/graph/GraphContext.cpp
new file mode 100644
index 0000000..bfc6fcd
--- /dev/null
+++ b/src/graph/GraphContext.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/GraphContext.h"
+
+using namespace arm_compute::graph;
+
+GraphHints::GraphHints(TargetHint target_hint, ConvolutionMethodHint conv_method_hint)
+    : _target_hint(target_hint), _convolution_method_hint(conv_method_hint)
+{
+}
+
+void GraphHints::set_target_hint(TargetHint target_hint)
+{
+    _target_hint = target_hint;
+}
+
+void GraphHints::set_convolution_method_hint(ConvolutionMethodHint convolution_method)
+{
+    _convolution_method_hint = convolution_method;
+}
+
+TargetHint GraphHints::target_hint() const
+{
+    return _target_hint;
+}
+
+ConvolutionMethodHint GraphHints::convolution_method_hint() const
+{
+    return _convolution_method_hint;
+}
+
+GraphContext::GraphContext()
+    : _hints()
+{
+}
+
+GraphHints &GraphContext::hints()
+{
+    return _hints;
+}
+
+const GraphHints &GraphContext::hints() const
+{
+    return _hints;
+}
\ No newline at end of file
diff --git a/src/graph/INode.cpp b/src/graph/INode.cpp
index 6b25022..4b383f5 100644
--- a/src/graph/INode.cpp
+++ b/src/graph/INode.cpp
@@ -21,7 +21,6 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-
 #include "arm_compute/graph/INode.h"
 
 #include "arm_compute/core/CL/OpenCL.h"
@@ -31,17 +30,20 @@
 
 using namespace arm_compute::graph;
 
-Hint INode::override_hint(Hint hint) const
+TargetHint INode::override_target_hint(TargetHint target_hint) const
 {
-    if(hint == Hint::OPENCL && !opencl_is_available())
+    if(target_hint == TargetHint::OPENCL && !opencl_is_available())
     {
-        hint = Hint::DONT_CARE;
+        target_hint = TargetHint::DONT_CARE;
     }
-    hint = node_override_hint(hint);
-    ARM_COMPUTE_ERROR_ON(hint == Hint::OPENCL && !opencl_is_available());
-    return hint;
+    GraphHints hints{ target_hint };
+    target_hint = node_override_hints(hints).target_hint();
+    ARM_COMPUTE_ERROR_ON(target_hint == TargetHint::OPENCL && !opencl_is_available());
+    return target_hint;
 }
-Hint INode::node_override_hint(Hint hint) const
+GraphHints INode::node_override_hints(GraphHints hints) const
 {
-    return hint == Hint::DONT_CARE ? Hint::NEON : hint;
+    TargetHint target_hint = hints.target_hint();
+    hints.set_target_hint((target_hint == TargetHint::DONT_CARE) ? TargetHint::NEON : target_hint);
+    return hints;
 }
diff --git a/src/graph/SubTensor.cpp b/src/graph/SubTensor.cpp
index a70f329..abf8506 100644
--- a/src/graph/SubTensor.cpp
+++ b/src/graph/SubTensor.cpp
@@ -21,7 +21,6 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-
 #include "arm_compute/graph/SubTensor.h"
 
 #include "arm_compute/core/Error.h"
@@ -45,12 +44,12 @@
 } // namespace
 
 SubTensor::SubTensor()
-    : _target(Hint::DONT_CARE), _coords(), _info(), _parent(nullptr), _subtensor(nullptr)
+    : _target(TargetHint::DONT_CARE), _coords(), _info(), _parent(nullptr), _subtensor(nullptr)
 {
 }
 
 SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords)
-    : _target(Hint::DONT_CARE), _coords(coords), _info(), _parent(nullptr), _subtensor(nullptr)
+    : _target(TargetHint::DONT_CARE), _coords(coords), _info(), _parent(nullptr), _subtensor(nullptr)
 {
     ARM_COMPUTE_ERROR_ON(parent.tensor() == nullptr);
     _parent = parent.tensor();
@@ -60,7 +59,7 @@
     instantiate_subtensor();
 }
 
-SubTensor::SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, Hint target)
+SubTensor::SubTensor(ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target)
     : _target(target), _coords(coords), _info(), _parent(parent), _subtensor(nullptr)
 {
     ARM_COMPUTE_ERROR_ON(parent == nullptr);
@@ -84,7 +83,7 @@
     return _subtensor.get();
 }
 
-Hint SubTensor::target() const
+TargetHint SubTensor::target() const
 {
     return _target;
 }
@@ -93,13 +92,13 @@
 {
     switch(_target)
     {
-        case Hint::OPENCL:
+        case TargetHint::OPENCL:
             _subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _info.tensor_shape(), _coords);
             break;
-        case Hint::NEON:
+        case TargetHint::NEON:
             _subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _info.tensor_shape(), _coords);
             break;
         default:
-            ARM_COMPUTE_ERROR("Invalid Hint");
+            ARM_COMPUTE_ERROR("Invalid TargetHint");
     }
 }
diff --git a/src/graph/Tensor.cpp b/src/graph/Tensor.cpp
index c534ae0..31dd4e8 100644
--- a/src/graph/Tensor.cpp
+++ b/src/graph/Tensor.cpp
@@ -21,7 +21,6 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-
 #include "arm_compute/graph/Tensor.h"
 
 #include "arm_compute/core/Error.h"
@@ -53,7 +52,7 @@
 } // namespace
 
 Tensor::Tensor(TensorInfo &&info)
-    : _target(Hint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
+    : _target(TargetHint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
 {
 }
 
@@ -96,7 +95,7 @@
     return _info;
 }
 
-ITensor *Tensor::set_target(Hint target)
+ITensor *Tensor::set_target(TargetHint target)
 {
     if(_tensor != nullptr)
     {
@@ -106,14 +105,14 @@
     {
         switch(target)
         {
-            case Hint::OPENCL:
+            case TargetHint::OPENCL:
                 _tensor = initialise_tensor<arm_compute::CLTensor>(_info);
                 break;
-            case Hint::NEON:
+            case TargetHint::NEON:
                 _tensor = initialise_tensor<arm_compute::Tensor>(_info);
                 break;
             default:
-                ARM_COMPUTE_ERROR("Invalid Hint");
+                ARM_COMPUTE_ERROR("Invalid TargetHint");
         }
         _target = target;
     }
@@ -125,14 +124,14 @@
     ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor.get());
     switch(_target)
     {
-        case Hint::OPENCL:
+        case TargetHint::OPENCL:
             tensor_allocate<arm_compute::CLTensor>(*_tensor);
             break;
-        case Hint::NEON:
+        case TargetHint::NEON:
             tensor_allocate<arm_compute::Tensor>(*_tensor);
             break;
         default:
-            ARM_COMPUTE_ERROR("Invalid Hint");
+            ARM_COMPUTE_ERROR("Invalid TargetHint");
     }
 }
 
@@ -145,7 +144,7 @@
     }
 }
 
-Hint Tensor::target() const
+TargetHint Tensor::target() const
 {
     return _target;
 }
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
index b71e22c..da2dac0 100644
--- a/src/graph/nodes/ActivationLayer.cpp
+++ b/src/graph/nodes/ActivationLayer.cpp
@@ -34,7 +34,7 @@
 
 namespace
 {
-template <typename ActivationType, typename TensorType, Hint hint>
+template <typename ActivationType, typename TensorType, TargetHint target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
 {
     auto activation = arm_compute::support::cpp14::make_unique<ActivationType>();
@@ -46,19 +46,19 @@
     return std::move(activation);
 }
 
-template <Hint                          hint>
+template <TargetHint                    target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
 {
-    return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, activation_info);
+    return instantiate_function<arm_compute::CLActivationLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, activation_info);
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const ActivationLayerInfo &activation_info)
 {
-    return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, Hint::NEON>(input, output, activation_info);
+    return instantiate_function<arm_compute::NEActivationLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, activation_info);
 }
 } // namespace
 
@@ -67,27 +67,27 @@
 {
 }
 
-std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
 {
     std::unique_ptr<arm_compute::IFunction> func;
-    _hint   = hint;
-    _input  = input;
-    _output = output;
+    _target_hint = ctx.hints().target_hint();
+    _input       = input;
+    _output      = output;
 
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
-        func = instantiate<Hint::OPENCL>(input, output, _activation_info);
+        func = instantiate<TargetHint::OPENCL>(input, output, _activation_info);
     }
     else
     {
-        func = instantiate<Hint::NEON>(input, output, _activation_info);
+        func = instantiate<TargetHint::NEON>(input, output, _activation_info);
     }
     return func;
 }
 
 void ActivationLayer::print_info()
 {
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
         std::cout << "Instantiating CLActivationLayer";
     }
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
index ce9f096..a992095 100644
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ b/src/graph/nodes/ConvolutionLayer.cpp
@@ -65,7 +65,7 @@
 }
 
 // Instantiate GEMM based convolution layer
-template <typename ConvolutionType, typename TensorType, Hint hint>
+template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
 {
     auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
@@ -79,7 +79,7 @@
 }
 
 // Instantiate direct convolution layer
-template <typename ConvolutionType, typename TensorType, Hint hint>
+template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
 {
     auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
@@ -92,35 +92,37 @@
     return std::move(conv);
 }
 
-template <Hint                          hint>
+template <TargetHint                    target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
                                                     ConvolutionMethodHint conv_method);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
-                                                                  ConvolutionMethodHint conv_method)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+                                                                        const WeightsInfo    &weights_info,
+                                                                        ConvolutionMethodHint conv_method)
 {
     if(conv_method == ConvolutionMethodHint::GEMM)
     {
-        return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
+        return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
     }
     else
     {
-        return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, Hint::OPENCL>(input, weights, biases, output, conv_info);
+        return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
     }
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
-                                                                ConvolutionMethodHint conv_method)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
+                                                                      const WeightsInfo    &weights_info,
+                                                                      ConvolutionMethodHint conv_method)
 {
     if(conv_method == ConvolutionMethodHint::GEMM)
     {
-        return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info, weights_info);
+        return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
     }
     else
     {
-        return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, Hint::NEON>(input, weights, biases, output, conv_info);
+        return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
     }
 }
 } // namespace
@@ -166,7 +168,7 @@
     std::vector<std::unique_ptr<IFunction>> _convolutions;
 };
 
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
 {
     // Set weights and biases info
     if(_weights.tensor() == nullptr)
@@ -181,17 +183,18 @@
     }
 
     std::unique_ptr<arm_compute::IFunction> func;
-    _hint   = hint;
-    _input  = input;
-    _output = output;
+    _target_hint                                 = ctx.hints().target_hint();
+    _input                                       = input;
+    _output                                      = output;
+    const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
 
     // Check if the weights and biases are loaded
     bool weights_are_loaded = _weights.tensor() != nullptr;
     bool biases_are_loaded  = _weights.tensor() != nullptr;
 
     // Set bias and weights target
-    _weights.set_target(_hint);
-    _biases.set_target(_hint);
+    _weights.set_target(_target_hint);
+    _biases.set_target(_target_hint);
 
     // Calculate output shape
     TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
@@ -200,14 +203,13 @@
     arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
 
     // Create appropriate convolution function
-    // TODO(geopin01): Fix convolution layer hints once the GraphContext has been added
     if(_num_groups == 1)
     {
-        func = instantiate_convolution(ConvolutionMethodHint::GEMM);
+        func = instantiate_convolution(conv_method_hint);
     }
     else
     {
-        func = instantiate_grouped_convolution(ConvolutionMethodHint::GEMM);
+        func = instantiate_grouped_convolution(conv_method_hint);
     }
 
     // Fill weights
@@ -226,7 +228,7 @@
 
 void ConvolutionLayer::print_info()
 {
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
         std::cout << "Instantiating CLConvolutionLayer";
     }
@@ -248,13 +250,13 @@
 std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
 {
     std::unique_ptr<arm_compute::IFunction> func;
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
-        func = instantiate<Hint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+        func = instantiate<TargetHint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
     }
     else
     {
-        func = instantiate<Hint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
+        func = instantiate<TargetHint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
     }
     return func;
 }
@@ -306,20 +308,20 @@
         Coordinates biases_coord(biases_split * i);
 
         // Create sub-tensors for input, output, weights and bias
-        auto hint_to_use = (_hint == Hint::OPENCL) ? Hint::OPENCL : Hint::NEON;
+        auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
         _is[i]           = SubTensor(_input, input_shape, input_coord, hint_to_use);
         _os[i]           = SubTensor(_output, output_shape, output_coord, hint_to_use);
         _ws[i]           = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
         _bs[i]           = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
 
         // Instantiate convolution function
-        if(_hint == Hint::OPENCL)
+        if(_target_hint == TargetHint::OPENCL)
         {
-            func = instantiate<Hint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+            func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
         }
         else
         {
-            func = instantiate<Hint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
+            func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
         }
 
         // Add convolution function to the list of convolutions for the grouped convolution
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index fcc86be..c317660 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -43,7 +43,7 @@
     }
     return TensorShape(output_neurons, batches);
 }
-template <typename FullyConnectedType, typename TensorType, Hint hint>
+template <typename FullyConnectedType, typename TensorType, TargetHint target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
 {
     bool weights_are_loaded = weights.tensor() != nullptr;
@@ -52,8 +52,8 @@
     auto conv = arm_compute::support::cpp14::make_unique<FullyConnectedType>();
     conv->configure(
         dynamic_cast<TensorType *>(input),
-        dynamic_cast<TensorType *>(weights.set_target(hint)),
-        dynamic_cast<TensorType *>(biases.set_target(hint)),
+        dynamic_cast<TensorType *>(weights.set_target(target_hint)),
+        dynamic_cast<TensorType *>(biases.set_target(target_hint)),
         dynamic_cast<TensorType *>(output));
     if(!weights_are_loaded)
     {
@@ -67,23 +67,23 @@
     return std::move(conv);
 }
 
-template <Hint                          hint>
+template <TargetHint                    target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
 {
-    return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, Hint::OPENCL>(input, weights, biases, output);
+    return instantiate_function<arm_compute::CLFullyConnectedLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, weights, biases, output);
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, Tensor &weights, Tensor &biases, ITensor *output)
 {
-    return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, Hint::NEON>(input, weights, biases, output);
+    return instantiate_function<arm_compute::NEFullyConnectedLayer, arm_compute::Tensor, TargetHint::NEON>(input, weights, biases, output);
 }
 } // namespace
 
-std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
 {
     if(_weights.tensor() == nullptr)
     {
@@ -111,17 +111,17 @@
                                     input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position());
 
     std::unique_ptr<arm_compute::IFunction> func;
-    _hint   = hint;
-    _input  = input;
-    _output = output;
+    _target_hint = ctx.hints().target_hint();
+    _input       = input;
+    _output      = output;
 
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
-        func = instantiate<Hint::OPENCL>(input, _weights, _biases, output);
+        func = instantiate<TargetHint::OPENCL>(input, _weights, _biases, output);
     }
     else
     {
-        func = instantiate<Hint::NEON>(input, _weights, _biases, output);
+        func = instantiate<TargetHint::NEON>(input, _weights, _biases, output);
     }
 
     return func;
@@ -129,7 +129,7 @@
 
 void FullyConnectedLayer::print_info()
 {
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
         std::cout << "Instantiating CLFullyConnectedLayer";
     }
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
index 55ef9bf..99d07dc 100644
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ b/src/graph/nodes/NormalizationLayer.cpp
@@ -34,7 +34,7 @@
 
 namespace
 {
-template <typename NormalizationType, typename TensorType, Hint hint>
+template <typename NormalizationType, typename TensorType, TargetHint target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
 {
     auto norm = arm_compute::support::cpp14::make_unique<NormalizationType>();
@@ -46,19 +46,19 @@
     return std::move(norm);
 }
 
-template <Hint                          hint>
+template <TargetHint                    target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
 {
-    return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, norm_info);
+    return instantiate_function<arm_compute::CLNormalizationLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, norm_info);
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const NormalizationLayerInfo &norm_info)
 {
-    return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, Hint::NEON>(input, output, norm_info);
+    return instantiate_function<arm_compute::NENormalizationLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, norm_info);
 }
 } // namespace
 
@@ -67,20 +67,20 @@
 {
 }
 
-std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
 {
     std::unique_ptr<arm_compute::IFunction> func;
-    _hint   = hint;
-    _input  = input;
-    _output = output;
+    _target_hint = ctx.hints().target_hint();
+    _input       = input;
+    _output      = output;
 
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
-        func = instantiate<Hint::OPENCL>(input, output, _norm_info);
+        func = instantiate<TargetHint::OPENCL>(input, output, _norm_info);
     }
     else
     {
-        func = instantiate<Hint::NEON>(input, output, _norm_info);
+        func = instantiate<TargetHint::NEON>(input, output, _norm_info);
     }
 
     return func;
@@ -88,7 +88,7 @@
 
 void NormalizationLayer::print_info()
 {
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
         std::cout << "Instantiating CLNormalizationLayer";
     }
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
index f29332f..2a5e4cb 100644
--- a/src/graph/nodes/PoolingLayer.cpp
+++ b/src/graph/nodes/PoolingLayer.cpp
@@ -34,7 +34,7 @@
 
 namespace
 {
-template <typename PoolingType, typename TensorType, Hint hint>
+template <typename PoolingType, typename TensorType, TargetHint target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
 {
     auto pool = arm_compute::support::cpp14::make_unique<PoolingType>();
@@ -46,19 +46,19 @@
     return std::move(pool);
 }
 
-template <Hint                          hint>
+template <TargetHint                    target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
 {
-    return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output, pool_info);
+    return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, pool_info);
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
 {
-    return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, Hint::NEON>(input, output, pool_info);
+    return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, pool_info);
 }
 } // namespace
 
@@ -67,20 +67,20 @@
 {
 }
 
-std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
 {
     std::unique_ptr<arm_compute::IFunction> func;
-    _hint   = hint;
-    _input  = input;
-    _output = output;
+    _target_hint = ctx.hints().target_hint();
+    _input       = input;
+    _output      = output;
 
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
-        func = instantiate<Hint::OPENCL>(input, output, _pool_info);
+        func = instantiate<TargetHint::OPENCL>(input, output, _pool_info);
     }
     else
     {
-        func = instantiate<Hint::NEON>(input, output, _pool_info);
+        func = instantiate<TargetHint::NEON>(input, output, _pool_info);
     }
 
     return func;
@@ -88,7 +88,7 @@
 
 void PoolingLayer::print_info()
 {
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
         std::cout << "Instantiating CLPoolingLayer";
     }
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
index fee8897..9e798ef 100644
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ b/src/graph/nodes/SoftmaxLayer.cpp
@@ -34,7 +34,7 @@
 
 namespace
 {
-template <typename SoftmaxType, typename TensorType, Hint hint>
+template <typename SoftmaxType, typename TensorType, TargetHint hint>
 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output)
 {
     auto softmax = arm_compute::support::cpp14::make_unique<SoftmaxType>();
@@ -45,36 +45,36 @@
     return std::move(softmax);
 }
 
-template <Hint                          hint>
+template <TargetHint                    target_hint>
 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output);
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::OPENCL>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output)
 {
-    return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, Hint::OPENCL>(input, output);
+    return instantiate_function<arm_compute::CLSoftmaxLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output);
 }
 
 template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<Hint::NEON>(ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output)
 {
-    return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, Hint::NEON>(input, output);
+    return instantiate_function<arm_compute::NESoftmaxLayer, arm_compute::Tensor, TargetHint::NEON>(input, output);
 }
 } // namespace
 
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(Hint hint, ITensor *input, ITensor *output)
+std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
 {
     std::unique_ptr<arm_compute::IFunction> func;
-    _hint   = hint;
-    _input  = input;
-    _output = output;
+    _target_hint = ctx.hints().target_hint();
+    _input       = input;
+    _output      = output;
 
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
-        func = instantiate<Hint::OPENCL>(input, output);
+        func = instantiate<TargetHint::OPENCL>(input, output);
     }
     else
     {
-        func = instantiate<Hint::NEON>(input, output);
+        func = instantiate<TargetHint::NEON>(input, output);
     }
 
     return func;
@@ -82,7 +82,7 @@
 
 void SoftmaxLayer::print_info()
 {
-    if(_hint == Hint::OPENCL)
+    if(_target_hint == TargetHint::OPENCL)
     {
         std::cout << "Instantiating CLSoftmaxLayer";
     }