COMPMID-797: Switch to new graph.

- Cleaned up build system

Change-Id: If2faa27ee5b31fa8b972836960ab3ef671059c8d
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126435
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 0ef800f..a8ed973 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -115,6 +115,8 @@
     LOAD_FUNCTION_PTR(clSVMFree, handle);
     LOAD_FUNCTION_PTR(clEnqueueSVMMap, handle);
     LOAD_FUNCTION_PTR(clEnqueueSVMUnmap, handle);
+    LOAD_FUNCTION_PTR(clEnqueueMarker, handle);
+    LOAD_FUNCTION_PTR(clWaitForEvents, handle);
 
 #undef LOAD_FUNCTION_PTR
 
@@ -133,6 +135,36 @@
 }
 } // namespace arm_compute
 
+cl_int clEnqueueMarker(cl_command_queue command_queue,
+                       cl_event        *event)
+{
+    arm_compute::CLSymbols::get().load_default();
+    auto func = arm_compute::CLSymbols::get().clEnqueueMarker_ptr;
+    if(func != nullptr)
+    {
+        return func(command_queue, event);
+    }
+    else
+    {
+        return CL_OUT_OF_RESOURCES;
+    }
+}
+
+cl_int clWaitForEvents(cl_uint         num_events,
+                       const cl_event *event_list)
+{
+    arm_compute::CLSymbols::get().load_default();
+    auto func = arm_compute::CLSymbols::get().clWaitForEvents_ptr;
+    if(func != nullptr)
+    {
+        return func(num_events, event_list);
+    }
+    else
+    {
+        return CL_OUT_OF_RESOURCES;
+    }
+}
+
 cl_int clEnqueueSVMMap(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags flags, void *svm_ptr,
                        size_t size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event)
 {
diff --git a/src/graph/CL/CLMap.cpp b/src/graph/CL/CLMap.cpp
deleted file mode 100644
index 5289ea9..0000000
--- a/src/graph/CL/CLMap.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/CL/CLMap.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/graph/ITensorObject.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-
-using namespace arm_compute::graph;
-
-CLMap::CLMap(ITensorObject *tensor, bool blocking)
-    : _tensor(dynamic_cast<arm_compute::ICLTensor *>(tensor->tensor())), _blocking(blocking)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor);
-}
-
-void CLMap::run()
-{
-    _tensor->map(arm_compute::CLScheduler::get().queue(), _blocking);
-}
diff --git a/src/graph/CL/CLUnmap.cpp b/src/graph/CL/CLUnmap.cpp
deleted file mode 100644
index 31f2f19..0000000
--- a/src/graph/CL/CLUnmap.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/CL/CLUnmap.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/graph/ITensorObject.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-
-using namespace arm_compute::graph;
-
-CLUnmap::CLUnmap(ITensorObject *tensor)
-    : _tensor(dynamic_cast<arm_compute::ICLTensor *>(tensor->tensor()))
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor);
-}
-
-void CLUnmap::run()
-{
-    _tensor->unmap(arm_compute::CLScheduler::get().queue());
-}
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index 47bd672..e1ffeed 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,293 +23,205 @@
  */
 #include "arm_compute/graph/Graph.h"
 
-#include "arm_compute/graph/CL/CLMap.h"
-#include "arm_compute/graph/CL/CLUnmap.h"
-#include "arm_compute/graph/INode.h"
-#include "arm_compute/graph/ITensorObject.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTuner.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "support/ToolchainSupport.h"
-
-#include <sys/stat.h>
-
-using namespace arm_compute::graph;
-
-namespace
+namespace arm_compute
 {
-bool file_exists(const std::string &filename)
+namespace graph
 {
-    std::ifstream file(filename);
-    return file.good();
+Graph::Graph(GraphID id, std::string name)
+    : _id(id), _name(std::move(name)), _nodes(), _edges(), _tensors(), _tagged_nodes(), _mtx()
+{
 }
 
-} // namespace
-struct Stage
+bool Graph::remove_node(NodeID nid)
 {
-    ITensorObject                          *_input;
-    ITensorObject                          *_output;
-    std::unique_ptr<arm_compute::IFunction> _function;
-};
-
-struct Graph::Private
-{
-public:
-    /** Finalizes the current node's configuration
-     *
-     * @param _next_hint Device execution hint
-     */
-    void configure(GraphHints _next_hints);
-
-    GraphContext                                _ctx{};
-    std::vector<Stage>                          _pipeline{};
-    std::vector<std::unique_ptr<ITensorObject>> _tensors{};
-    std::vector<std::unique_ptr<INode>>         _nodes{};
-    GraphHints                                  _current_hints{};
-    GraphHints                                  _next_hints{};
-    std::unique_ptr<ITensorObject>              _graph_input{ nullptr };
-    std::unique_ptr<ITensorObject>              _graph_output{ nullptr };
-    std::unique_ptr<INode>                      _current_node{ nullptr };
-    ITensorObject                              *_current_output{ nullptr };
-    bool                                        _info_enabled{ false };
-    CLTuner                                     _tuner{};
-
-private:
-    ITensorObject *_current_input{ nullptr };
-    GraphHints     _previous_hints{};
-};
-
-static const std::string tuner_data_filename = "acl_tuner.csv";
-Graph::~Graph() //NOLINT
-{
-    if(_pimpl->_tuner.tune_new_kernels() && !_pimpl->_tuner.lws_table().empty())
+    if(nid >= _nodes.size())
     {
-        _pimpl->_tuner.save_to_file(tuner_data_filename);
+        return false;
     }
-}
 
-Graph::Graph()
-    : _pimpl{ new Private() }
-{
-    graph_init();
-}
+    std::unique_ptr<INode> &node = _nodes[nid];
 
-void Graph::graph_init(const bool use_cl_tuner)
-{
-    // Check if OpenCL is available and initialize the scheduler
-    if(opencl_is_available())
+    // Remove node connections
+    if(node)
     {
-        if(_pimpl->_tuner.lws_table().empty() && file_exists(tuner_data_filename))
+        for(auto &input_eid : node->_input_edges)
         {
-            _pimpl->_tuner.load_from_file(tuner_data_filename);
+            remove_connection(input_eid);
         }
-        _pimpl->_tuner.set_tune_new_kernels(use_cl_tuner);
-        arm_compute::CLScheduler::get().default_init(&_pimpl->_tuner);
-    }
-}
-void Graph::run()
-{
-    while(true)
-    {
-        if(_pimpl->_graph_input->has_accessor() && !_pimpl->_graph_input->call_accessor())
+        for(auto &outpud_eid : node->_output_edges)
         {
-            return;
-        }
-
-        for(auto &stage : _pimpl->_pipeline)
-        {
-            stage._function->run();
-        }
-
-        if((_pimpl->_graph_output->has_accessor() && !_pimpl->_graph_output->call_accessor())
-           || (!_pimpl->_graph_output->has_accessor()))
-        {
-            return;
-        }
-    }
-}
-
-//Finalize current node's configuration
-void Graph::Private::configure(GraphHints _next_hints)
-{
-    ARM_COMPUTE_ERROR_ON(_current_node == nullptr);
-    ARM_COMPUTE_ERROR_ON(_graph_input == nullptr);
-
-    // Is it the first node of the graph ?
-    if(_current_input == nullptr)
-    {
-        _graph_input->set_target(_current_hints.target_hint());
-        _current_input  = _graph_input.get();
-        _previous_hints = _current_hints; // For the first node just assume the previous node was of the same type as this one
-    }
-
-    if(_current_node->supports_in_place())
-    {
-        _current_output = _current_input;
-    }
-
-    //Automatic output configuration ?
-    if(_current_output == nullptr)
-    {
-        _tensors.push_back(arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo()));
-        _current_output = _tensors.back().get();
-    }
-
-    // If either the writer or reader node needs OpenCL then use OpenCL memory:
-    if((_next_hints.target_hint() == TargetHint::OPENCL || _current_hints.target_hint() == TargetHint::OPENCL))
-    {
-        _current_output->set_target(TargetHint::OPENCL);
-    }
-    else
-    {
-        _current_output->set_target(TargetHint::NEON);
-    }
-
-    // Instantiate Node
-    _ctx.hints()                                 = _current_hints;
-    std::unique_ptr<arm_compute::IFunction> func = _current_node->instantiate_node(_ctx, _current_input, _current_output);
-
-    // If the operation is done in-place, do not allocate or it will prevent following layers from performing the configuration
-    if(!_current_node->supports_in_place())
-    {
-        // Allocate current input
-        _current_input->allocate();
-    }
-
-    // Map input if needed
-    if(_current_input->target() == TargetHint::OPENCL)
-    {
-        if(_previous_hints.target_hint() == TargetHint::NEON)
-        {
-            ARM_COMPUTE_ERROR_ON(_current_hints.target_hint() == TargetHint::NEON);
-            _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLUnmap>(_current_input) });
-        }
-        if(_current_hints.target_hint() == TargetHint::NEON)
-        {
-            ARM_COMPUTE_ERROR_ON(_previous_hints.target_hint() == TargetHint::NEON);
-            _pipeline.push_back({ _current_input, _current_input, arm_compute::support::cpp14::make_unique<CLMap>(_current_input, true) });
+            remove_connection(outpud_eid);
         }
     }
 
-    _pipeline.push_back({ _current_input, _current_output, std::move(func) });
+    node = nullptr;
 
-    _current_input  = _current_output;
-    _current_output = nullptr;
-    std::swap(_previous_hints, _current_hints);
-    std::swap(_current_hints, _next_hints);
+    return true;
 }
 
-void Graph::add_node(std::unique_ptr<INode> node)
+EdgeID Graph::add_connection(NodeID source, size_t source_idx, NodeID sink, size_t sink_idx)
 {
-    ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_input == nullptr, "The graph's input must be set before the first node is added");
-    ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_graph_output != nullptr, "Nothing can be added after the output tensor");
-    //Trigger the creation of the current Node:
+    std::lock_guard<arm_compute::Mutex> lock(_mtx);
 
-    GraphHints _next_hints = _pimpl->_next_hints;
-    _next_hints.set_target_hint(node->override_target_hint(_pimpl->_next_hints.target_hint()));
-    ARM_COMPUTE_ERROR_ON(_next_hints.target_hint() == TargetHint::DONT_CARE);
-    if(_pimpl->_current_node)
+    // Check if node index is valid, if node exists and finally if the connection index is valid
+    ARM_COMPUTE_ERROR_ON((source >= _nodes.size()) || (_nodes[source] == nullptr) || (source_idx >= _nodes[source]->num_outputs()));
+    ARM_COMPUTE_ERROR_ON((sink >= _nodes.size()) || (_nodes[sink] == nullptr) || (sink_idx >= _nodes[sink]->num_inputs()));
+
+    // Get nodes
+    std::unique_ptr<INode> &source_node = _nodes[source];
+    std::unique_ptr<INode> &sink_node   = _nodes[sink];
+
+    // Check for duplicate connections (Check only sink node)
+    Edge *sink_node_edge = sink_node->input_edge(sink_idx);
+    if((sink_node_edge != nullptr) && (sink_node_edge->producer_id() == source) && (sink_node_edge->producer_idx() == source_idx)
+       && (sink_node_edge->consumer_id() == sink) && (sink_node_edge->consumer_idx() == sink_idx))
     {
-        //Finalize the previous Node:
-        _pimpl->configure(_pimpl->_next_hints);
+        return sink_node_edge->id();
     }
-    else
+
+    // Check if there is already a tensor associated with output if not create one
+    TensorID tid = source_node->output_id(source_idx);
+    if(tid == NullTensorID)
     {
-        // If that's the first node then use the same TargetHint before and after the node.
-        _pimpl->_current_hints = _next_hints;
+        tid = create_tensor();
     }
-    if(_pimpl->_current_node)
+    std::unique_ptr<Tensor> &tensor = _tensors[tid];
+
+    // Create connections
+    EdgeID eid        = _edges.size();
+    auto   connection = arm_compute::support::cpp14::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
+    _edges.push_back(std::move(connection));
+
+    // Add connections to source and sink nodes
+    source_node->_output_edges.insert(eid);
+    sink_node->_input_edges[sink_idx] = eid;
+
+    // Set tensor output node
+    source_node->_outputs[source_idx] = tid;
+
+    // Bind tensor to the edge
+    tensor->bind_edge(eid);
+
+    // Try and propagate shapes in sink node
+    sink_node->forward_descriptors();
+
+    return eid;
+}
+
+bool Graph::remove_connection(EdgeID eid)
+{
+    if(eid >= _edges.size())
     {
-        _pimpl->_nodes.push_back(std::move(_pimpl->_current_node));
+        return false;
     }
-    _pimpl->_current_node = std::move(node);
-}
 
-//Add a tensor with an Accessor (i.e either the input or output of the graph)
-void Graph::add_tensor_object(std::unique_ptr<ITensorObject> tensor)
-{
-    // If it's the first Tensor added then it will be the input of the Graph.
-    if(_pimpl->_graph_input == nullptr)
+    std::unique_ptr<Edge> &edge = _edges[eid];
+
+    // Remove node connections
+    if(edge != nullptr)
     {
-        ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
-        ARM_COMPUTE_ERROR_ON(_pimpl->_current_node != nullptr);
-        _pimpl->_graph_input = std::move(tensor);
+        // Get tensor bound to the edge
+        if(edge->tensor() != nullptr)
+        {
+            edge->tensor()->unbind_edge(eid);
+        }
+
+        // Remove edges from source node
+        if(edge->producer() != nullptr)
+        {
+            edge->producer()->_output_edges.erase(eid);
+        }
+
+        // Remove edges from sink node
+        if((edge->consumer() != nullptr) && (edge->consumer_idx() < edge->consumer()->_input_edges.size()))
+        {
+            edge->consumer()->_input_edges[edge->consumer_idx()] = EmptyEdgeID;
+        }
     }
-    else
-    {
-        // Else it will be the output of the Graph
-        ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
-        ARM_COMPUTE_ERROR_ON(_pimpl->_current_node == nullptr);
-        _pimpl->_graph_output   = std::move(tensor);
-        _pimpl->_current_output = _pimpl->_graph_output.get();
 
-        // Finalize the graph by configuring the last Node of the graph:
-        _pimpl->configure(_pimpl->_current_hints); // Ignore _next_hint as this is the last node, and just use the same hint as before this node.
-        _pimpl->_graph_output->allocate();
-    }
+    // Clear edge
+    edge = nullptr;
+
+    return true;
 }
 
-bool Graph::opencl_is_available()
+TensorID Graph::create_tensor(TensorDescriptor desc)
 {
-    return arm_compute::opencl_is_available();
+    TensorID tid    = _tensors.size();
+    auto     tensor = support::cpp14::make_unique<Tensor>(tid, desc);
+    _tensors.push_back(std::move(tensor));
+
+    return tid;
 }
 
-arm_compute::GPUTarget Graph::gpu_target()
+std::string Graph::name() const
 {
-    // Check if OpenCL is available before returning the GPU target
-    if(opencl_is_available())
-    {
-        return arm_compute::CLScheduler::get().target();
-    }
-    else
-    {
-        return GPUTarget::MIDGARD;
-    }
+    return _name;
 }
 
-void Graph::set_temp(TensorInfo &&tmp)
+GraphID Graph::id() const
 {
-    ARM_COMPUTE_ERROR_ON(_pimpl->_graph_input == nullptr);
-    ARM_COMPUTE_ERROR_ON(_pimpl->_graph_output != nullptr);
-    ARM_COMPUTE_ERROR_ON_MSG(_pimpl->_current_output != nullptr, "TensorInfo for temporary tensor already set");
-
-    _pimpl->_tensors.push_back(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tmp)));
-    _pimpl->_current_output = _pimpl->_tensors.back().get();
+    return _id;
 }
 
-GraphHints &Graph::hints()
+const std::vector<NodeID> &Graph::inputs()
 {
-    return _pimpl->_next_hints;
+    return _tagged_nodes[NodeType::Input];
 }
 
-Graph &arm_compute::graph::operator<<(Graph &graph, TensorInfo &&info)
+std::vector<std::unique_ptr<INode>> &Graph::nodes()
 {
-    graph.set_temp(std::move(info));
-    return graph;
+    return _nodes;
 }
 
-Graph &arm_compute::graph::operator<<(Graph &graph, Tensor &&tensor)
+const std::vector<std::unique_ptr<INode>> &Graph::nodes() const
 {
-    graph.add_tensor_object(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tensor)));
-    return graph;
+    return _nodes;
 }
 
-Graph &arm_compute::graph::operator<<(Graph &graph, SubTensor &&sub_tensor)
+const std::vector<std::unique_ptr<Edge>> &Graph::edges() const
 {
-    graph.add_tensor_object(arm_compute::support::cpp14::make_unique<SubTensor>(std::move(sub_tensor)));
-    return graph;
+    return _edges;
 }
 
-Graph &arm_compute::graph::operator<<(Graph &graph, TargetHint target_hint)
+std::vector<std::unique_ptr<Tensor>> &Graph::tensors()
 {
-    graph.hints().set_target_hint(target_hint);
-    return graph;
+    return _tensors;
 }
 
-Graph &arm_compute::graph::operator<<(Graph &graph, ConvolutionMethodHint conv_method_hint)
+const std::vector<std::unique_ptr<Tensor>> &Graph::tensors() const
 {
-    graph.hints().set_convolution_method_hint(conv_method_hint);
-    return graph;
+    return _tensors;
 }
+
+const INode *Graph::node(NodeID id) const
+{
+    return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
+}
+
+INode *Graph::node(NodeID id)
+{
+    return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
+}
+
+const Edge *Graph::edge(EdgeID id) const
+{
+    return (id >= _edges.size()) ? nullptr : _edges[id].get();
+}
+
+Edge *Graph::edge(EdgeID id)
+{
+    return (id >= _edges.size()) ? nullptr : _edges[id].get();
+}
+
+const Tensor *Graph::tensor(TensorID id) const
+{
+    return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
+}
+
+Tensor *Graph::tensor(TensorID id)
+{
+    return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
+}
+} // namespace graph
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
similarity index 98%
rename from src/graph2/GraphBuilder.cpp
rename to src/graph/GraphBuilder.cpp
index e6fc2af..0d1bdc3 100644
--- a/src/graph2/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -21,19 +21,19 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/GraphBuilder.h"
+#include "arm_compute/graph/GraphBuilder.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Utils.h"
-#include "arm_compute/graph2/algorithms/BFS.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Utils.h"
+#include "arm_compute/graph/algorithms/BFS.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 
 #define CHECK_NODEIDX_PAIR(pair, g) \
     ARM_COMPUTE_ERROR_ON(((pair).node_id >= (g).nodes().size()) || ((g).node((pair).node_id) == nullptr) || ((pair).index >= (g).node((pair).node_id)->num_outputs()));
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace
 {
@@ -390,5 +390,5 @@
 {
     return create_simple_single_input_output_node<SplitLayerNode>(g, params, input, num_splits, axis);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/GraphContext.cpp b/src/graph/GraphContext.cpp
index bfc6fcd..6fc45c0 100644
--- a/src/graph/GraphContext.cpp
+++ b/src/graph/GraphContext.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,45 +22,53 @@
  * SOFTWARE.
  */
 #include "arm_compute/graph/GraphContext.h"
+#include <arm_compute/graph.h>
 
-using namespace arm_compute::graph;
-
-GraphHints::GraphHints(TargetHint target_hint, ConvolutionMethodHint conv_method_hint)
-    : _target_hint(target_hint), _convolution_method_hint(conv_method_hint)
+namespace arm_compute
 {
-}
-
-void GraphHints::set_target_hint(TargetHint target_hint)
+namespace graph
 {
-    _target_hint = target_hint;
-}
-
-void GraphHints::set_convolution_method_hint(ConvolutionMethodHint convolution_method)
-{
-    _convolution_method_hint = convolution_method;
-}
-
-TargetHint GraphHints::target_hint() const
-{
-    return _target_hint;
-}
-
-ConvolutionMethodHint GraphHints::convolution_method_hint() const
-{
-    return _convolution_method_hint;
-}
-
 GraphContext::GraphContext()
-    : _hints()
+    : _config(), _memory_managers()
 {
 }
 
-GraphHints &GraphContext::hints()
+const GraphConfig &GraphContext::config() const
 {
-    return _hints;
+    return _config;
 }
 
-const GraphHints &GraphContext::hints() const
+void GraphContext::set_config(const GraphConfig &config)
 {
-    return _hints;
-}
\ No newline at end of file
+    _config = config;
+}
+
+bool GraphContext::insert_memory_management_ctx(MemoryManagerContext &&memory_ctx)
+{
+    Target target = memory_ctx.target;
+    if(target == Target::UNSPECIFIED || _memory_managers.find(target) != std::end(_memory_managers))
+    {
+        return false;
+    }
+
+    _memory_managers[target] = std::move(memory_ctx);
+    return true;
+}
+
+MemoryManagerContext *GraphContext::memory_management_ctx(Target target)
+{
+    return (_memory_managers.find(target) != std::end(_memory_managers)) ? &_memory_managers[target] : nullptr;
+}
+
+void GraphContext::finalize()
+{
+    for(auto &mm_obj : _memory_managers)
+    {
+        if(mm_obj.second.mm != nullptr)
+        {
+            mm_obj.second.mm->finalize();
+        }
+    }
+}
+} // namespace graph
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/GraphManager.cpp b/src/graph/GraphManager.cpp
similarity index 91%
rename from src/graph2/GraphManager.cpp
rename to src/graph/GraphManager.cpp
index a51ba61..759300e 100644
--- a/src/graph2/GraphManager.cpp
+++ b/src/graph/GraphManager.cpp
@@ -21,18 +21,18 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/GraphManager.h"
+#include "arm_compute/graph/GraphManager.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/PassManager.h"
-#include "arm_compute/graph2/Utils.h"
-#include "arm_compute/graph2/detail/ExecutionHelpers.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/PassManager.h"
+#include "arm_compute/graph/Utils.h"
+#include "arm_compute/graph/detail/ExecutionHelpers.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 GraphManager::GraphManager()
     : _workloads()
@@ -113,5 +113,5 @@
 
     _workloads.erase(it);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/INode.cpp b/src/graph/INode.cpp
index c753f66..c1c18e5 100644
--- a/src/graph/INode.cpp
+++ b/src/graph/INode.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,33 +23,171 @@
  */
 #include "arm_compute/graph/INode.h"
 
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/Edge.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Tensor.h"
 
-using namespace arm_compute::graph;
-
-TargetHint INode::override_target_hint(TargetHint target_hint) const
+namespace arm_compute
 {
-    if(target_hint == TargetHint::OPENCL && !opencl_is_available())
+namespace graph
+{
+// *INDENT-OFF*
+// clang-format off
+INode::INode()
+    : _graph(nullptr), _id(EmptyNodeID), _common_params({ "", Target::UNSPECIFIED}),
+      _outputs(), _input_edges(), _output_edges(), _assigned_target(Target::UNSPECIFIED)
+{
+}
+// clang-format on
+// *INDENT-ON*
+
+void INode::set_graph(Graph *g)
+{
+    ARM_COMPUTE_ERROR_ON(g == nullptr);
+    _graph = g;
+}
+
+void INode::set_id(NodeID id)
+{
+    _id = id;
+}
+
+void INode::set_common_node_parameters(NodeParams common_params)
+{
+    _common_params = std::move(common_params);
+}
+
+void INode::set_requested_target(Target target)
+{
+    _common_params.target = target;
+}
+
+void INode::set_assigned_target(Target target)
+{
+    _assigned_target = target;
+}
+
+void INode::set_output_tensor(TensorID tid, size_t idx)
+{
+    if(tid != NullTensorID && (idx < _outputs.size()) && (_graph->tensor(tid) != nullptr))
     {
-        target_hint = TargetHint::DONT_CARE;
+        ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+        Tensor *updated_tensor = _graph->tensor(tid);
+        _outputs[idx]          = tid;
+
+        // Set tensor to all output edges of the node
+        for(auto &output_edge_id : _output_edges)
+        {
+            auto output_edge = _graph->edge(output_edge_id);
+            if(output_edge != nullptr)
+            {
+                // Unbind edge from current tensor
+                auto current_output_tensor = output_edge->tensor();
+                current_output_tensor->unbind_edge(output_edge->id());
+
+                // Update tensor to edge and rebind tensor
+                output_edge->update_bound_tensor(updated_tensor);
+                updated_tensor->bind_edge(output_edge->id());
+            }
+        }
     }
-    GraphHints hints{ target_hint };
-    target_hint = node_override_hints(hints).target_hint();
-    ARM_COMPUTE_ERROR_ON(target_hint == TargetHint::OPENCL && !opencl_is_available());
-    return target_hint;
 }
-bool INode::supports_in_place() const
+
+NodeID INode::id() const
 {
-    return _supports_in_place;
+    return _id;
 }
-void INode::set_supports_in_place(bool value)
+
+std::string INode::name() const
 {
-    _supports_in_place = value;
+    return _common_params.name;
 }
-GraphHints INode::node_override_hints(GraphHints hints) const
+
+const Graph *INode::graph() const
 {
-    TargetHint target_hint = hints.target_hint();
-    hints.set_target_hint((target_hint == TargetHint::DONT_CARE) ? TargetHint::NEON : target_hint);
-    return hints;
+    return _graph;
 }
+
+Graph *INode::graph()
+{
+    return _graph;
+}
+
+const std::vector<TensorID> &INode::outputs() const
+{
+    return _outputs;
+}
+
+const std::vector<EdgeID> &INode::input_edges() const
+{
+    return _input_edges;
+}
+
+const std::set<EdgeID> &INode::output_edges() const
+{
+    return _output_edges;
+}
+
+TensorID INode::input_id(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    Edge *e = _graph->edge(_input_edges[idx]);
+    return (e != nullptr) ? e->tensor_id() : NullTensorID;
+}
+
+TensorID INode::output_id(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+    return _outputs[idx];
+}
+
+Tensor *INode::input(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    Edge *e = _graph->edge(_input_edges[idx]);
+    return (e != nullptr) ? e->tensor() : nullptr;
+}
+
+Tensor *INode::output(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+    return _graph->tensor(_outputs[idx]);
+}
+
+EdgeID INode::input_edge_id(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    return _input_edges[idx];
+}
+
+Edge *INode::input_edge(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    return _graph->edge(_input_edges[idx]);
+}
+
+size_t INode::num_inputs() const
+{
+    return _input_edges.size();
+}
+
+size_t INode::num_outputs() const
+{
+    return _outputs.size();
+}
+
+Target INode::requested_target() const
+{
+    return _common_params.target;
+}
+
+Target INode::assigned_target() const
+{
+    return _assigned_target;
+}
+} // namespace graph
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/NodeContext.cpp b/src/graph/NodeContext.cpp
deleted file mode 100644
index 2aa5aa1..0000000
--- a/src/graph/NodeContext.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/NodeContext.h"
-
-using namespace arm_compute::graph;
-
-void NodeContext::set_target(TargetHint target)
-{
-    _target = target;
-}
-
-void NodeContext::add_input(arm_compute::ITensor *input)
-{
-    ARM_COMPUTE_ERROR_ON(input == nullptr);
-    _inputs.emplace_back(input);
-}
-
-void NodeContext::add_output(arm_compute::ITensor *output)
-{
-    ARM_COMPUTE_ERROR_ON(output == nullptr);
-    _outputs.emplace_back(output);
-}
-
-OperationType NodeContext::operation() const
-{
-    return _operation;
-}
-
-TargetHint NodeContext::target() const
-{
-    return _target;
-}
-
-arm_compute::ITensor *NodeContext::input(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(idx >= _inputs.size());
-    return _inputs[idx];
-}
-
-arm_compute::ITensor *NodeContext::output(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
-    return _outputs[idx];
-}
-
-size_t NodeContext::num_inputs() const
-{
-    return _inputs.size();
-}
-
-size_t NodeContext::num_outputs() const
-{
-    return _outputs.size();
-}
\ No newline at end of file
diff --git a/src/graph/OperationRegistry.cpp b/src/graph/OperationRegistry.cpp
deleted file mode 100644
index 651653f..0000000
--- a/src/graph/OperationRegistry.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-OperationRegistry::OperationRegistry()
-    : _registered_ops()
-{
-}
-
-OperationRegistry &OperationRegistry::get()
-{
-    static OperationRegistry instance;
-    return instance;
-}
-
-IOperation *OperationRegistry::find_operation(OperationType operation, TargetHint target)
-{
-    ARM_COMPUTE_ERROR_ON(!contains(operation, target));
-    auto it = std::find_if(_registered_ops[operation].begin(), _registered_ops[operation].end(), [&](const std::unique_ptr<IOperation> &op)
-    {
-        return (op->target() == target);
-    });
-    ARM_COMPUTE_ERROR_ON(it == _registered_ops[operation].end());
-    return (*it).get();
-}
-
-bool OperationRegistry::contains(OperationType operation, TargetHint target) const
-{
-    auto it = _registered_ops.find(operation);
-    if(it != _registered_ops.end())
-    {
-        return std::any_of(it->second.begin(), it->second.end(), [&](const std::unique_ptr<IOperation> &op)
-        {
-            return (op->target() == target);
-        });
-    }
-    return false;
-}
diff --git a/src/graph2/PassManager.cpp b/src/graph/PassManager.cpp
similarity index 94%
rename from src/graph2/PassManager.cpp
rename to src/graph/PassManager.cpp
index 2fa937b..8ed68bd 100644
--- a/src/graph2/PassManager.cpp
+++ b/src/graph/PassManager.cpp
@@ -21,13 +21,13 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/PassManager.h"
+#include "arm_compute/graph/PassManager.h"
 
-#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph/Logger.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 PassManager::PassManager()
     : _passes()
@@ -84,5 +84,5 @@
         pass->mutate(g);
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/SubGraph.cpp b/src/graph/SubGraph.cpp
deleted file mode 100644
index b1cbb9c..0000000
--- a/src/graph/SubGraph.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/SubGraph.h"
-
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/INode.h"
-#include "arm_compute/graph/Tensor.h"
-
-using namespace arm_compute::graph;
-
-SubGraph::SubGraph()
-    : _nodes(), _input(nullptr), _output(nullptr)
-{
-}
-
-void SubGraph::add_node(std::unique_ptr<INode> node)
-{
-    _nodes.push_back(std::move(node));
-}
-
-void SubGraph::add_tensor_object(std::unique_ptr<ITensorObject> tensor)
-{
-    // If it's the first Tensor added then it will be the input of the Graph.
-    if(_input == nullptr)
-    {
-        _input = std::move(tensor);
-    }
-    else
-    {
-        _output = std::move(tensor);
-    }
-}
-
-std::unique_ptr<Graph> SubGraph::construct(const GraphContext &ctx, std::unique_ptr<ITensorObject> input, std::unique_ptr<ITensorObject> output)
-{
-    auto graph = arm_compute::support::cpp14::make_unique<Graph>();
-
-    // Set hint
-    // TODO(geopin01): store hints of sub-graph
-    graph->hints() = ctx.hints();
-
-    // Configure input
-    if(_input == nullptr)
-    {
-        _input = std::move(input);
-    }
-    graph->add_tensor_object(std::move(_input));
-
-    // Make sure first and last nodes of the subgraph always do operations out-of-place
-    _nodes.front()->set_supports_in_place(false);
-    _nodes.back()->set_supports_in_place(false);
-
-    // Construct nodes
-    for(auto &node : _nodes)
-    {
-        graph->add_node(std::move(node));
-    }
-
-    // Configure output
-    if(_output == nullptr)
-    {
-        _output = std::move(output);
-    }
-    graph->add_tensor_object(std::move(_output));
-
-    return graph;
-}
-
-bool SubGraph::has_input() const
-{
-    return _input != nullptr;
-}
-
-bool SubGraph::has_output() const
-{
-    return _output != nullptr;
-}
-
-SubGraph &arm_compute::graph::operator<<(SubGraph &graph, Tensor &&tensor)
-{
-    graph.add_tensor_object(arm_compute::support::cpp14::make_unique<Tensor>(std::move(tensor)));
-    return graph;
-}
-
-SubGraph &arm_compute::graph::operator<<(SubGraph &graph, SubTensor &&sub_tensor)
-{
-    graph.add_tensor_object(arm_compute::support::cpp14::make_unique<SubTensor>(std::move(sub_tensor)));
-    return graph;
-}
diff --git a/src/graph/SubTensor.cpp b/src/graph/SubTensor.cpp
deleted file mode 100644
index 2e640dd..0000000
--- a/src/graph/SubTensor.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/SubTensor.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/runtime/CL/CLSubTensor.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/SubTensor.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "utils/TypePrinter.h"
-
-using namespace arm_compute::graph;
-
-namespace
-{
-template <typename SubTensorType, typename ParentTensorType>
-std::unique_ptr<arm_compute::ITensor> initialise_subtensor(arm_compute::ITensor *parent, TensorShape shape, Coordinates coords, bool extend_parent)
-{
-    auto ptensor   = dynamic_cast<ParentTensorType *>(parent);
-    auto subtensor = arm_compute::support::cpp14::make_unique<SubTensorType>(ptensor, shape, coords, extend_parent);
-    return std::move(subtensor);
-}
-} // namespace
-
-SubTensor::SubTensor()
-    : _target(TargetHint::DONT_CARE), _tensor_shape(), _coords(), _parent(nullptr), _subtensor(nullptr), _extend_parent(false)
-{
-}
-
-SubTensor::SubTensor(Tensor &parent, TensorShape tensor_shape, Coordinates coords, bool extend_parent)
-    : _target(TargetHint::DONT_CARE), _tensor_shape(tensor_shape), _coords(coords), _parent(nullptr), _subtensor(nullptr), _extend_parent(extend_parent)
-{
-    ARM_COMPUTE_ERROR_ON(parent.tensor() == nullptr);
-    _parent = parent.tensor();
-    _target = parent.target();
-
-    instantiate_subtensor();
-}
-
-SubTensor::SubTensor(arm_compute::ITensor *parent, TensorShape tensor_shape, Coordinates coords, TargetHint target, bool extend_parent)
-    : _target(target), _tensor_shape(tensor_shape), _coords(coords), _parent(parent), _subtensor(nullptr), _extend_parent(extend_parent)
-{
-    ARM_COMPUTE_ERROR_ON(parent == nullptr);
-    instantiate_subtensor();
-}
-
-bool SubTensor::call_accessor()
-{
-    return true;
-}
-
-bool SubTensor::has_accessor() const
-{
-    return false;
-}
-
-arm_compute::ITensor *SubTensor::set_target(TargetHint target)
-{
-    ARM_COMPUTE_ERROR_ON(target != _target);
-    return (target == _target) ? _subtensor.get() : nullptr;
-}
-
-arm_compute::ITensor *SubTensor::tensor()
-{
-    return _subtensor.get();
-}
-
-const arm_compute::ITensor *SubTensor::tensor() const
-{
-    return _subtensor.get();
-}
-
-TargetHint SubTensor::target() const
-{
-    return _target;
-}
-
-void SubTensor::allocate()
-{
-    // NOP for sub-tensors
-}
-
-void SubTensor::instantiate_subtensor()
-{
-    switch(_target)
-    {
-        case TargetHint::OPENCL:
-            _subtensor = initialise_subtensor<arm_compute::CLSubTensor, arm_compute::ICLTensor>(_parent, _tensor_shape, _coords, _extend_parent);
-            break;
-        case TargetHint::NEON:
-            _subtensor = initialise_subtensor<arm_compute::SubTensor, arm_compute::ITensor>(_parent, _tensor_shape, _coords, _extend_parent);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Invalid TargetHint");
-    }
-}
diff --git a/src/graph/Tensor.cpp b/src/graph/Tensor.cpp
index 4db79e9..47fb5c6 100644
--- a/src/graph/Tensor.cpp
+++ b/src/graph/Tensor.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,138 +23,89 @@
  */
 #include "arm_compute/graph/Tensor.h"
 
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "utils/TypePrinter.h"
-
-using namespace arm_compute::graph;
-
-namespace
+namespace arm_compute
 {
-template <typename TensorType>
-std::unique_ptr<arm_compute::ITensor> initialise_tensor(TensorInfo &info)
+namespace graph
 {
-    auto tensor = arm_compute::support::cpp14::make_unique<TensorType>();
-    tensor->allocator()->init(info);
-    return std::move(tensor);
-}
-
-template <typename TensorType>
-void tensor_allocate(arm_compute::ITensor &tensor)
-{
-    auto itensor = dynamic_cast<TensorType *>(&tensor);
-    ARM_COMPUTE_ERROR_ON_NULLPTR(itensor);
-    itensor->allocator()->allocate();
-}
-} // namespace
-
-Tensor::Tensor(TensorInfo &&info)
-    : _target(TargetHint::DONT_CARE), _info(info), _accessor(nullptr), _tensor(nullptr)
+Tensor::Tensor(TensorID id, TensorDescriptor desc)
+    : _id(id), _desc(desc), _handle(nullptr), _accessor(nullptr), _bound_edges()
 {
 }
 
-Tensor::Tensor(Tensor &&src) noexcept
-    : _target(src._target),
-      _info(std::move(src._info)),
-      _accessor(std::move(src._accessor)),
-      _tensor(std::move(src._tensor))
+TensorID Tensor::id() const
 {
+    return _id;
 }
 
-void Tensor::set_info(TensorInfo &&info)
+TensorDescriptor &Tensor::desc()
 {
-    _info = info;
+    return _desc;
+}
+
+const TensorDescriptor &Tensor::desc() const
+{
+    return _desc;
+}
+
+void Tensor::set_handle(std::unique_ptr<ITensorHandle> backend_tensor)
+{
+    _handle = std::move(backend_tensor);
+}
+
+ITensorHandle *Tensor::handle()
+{
+    return _handle.get();
+}
+
+void Tensor::set_accessor(std::unique_ptr<ITensorAccessor> accessor)
+{
+    _accessor = std::move(accessor);
+}
+
+ITensorAccessor *Tensor::accessor()
+{
+    return _accessor.get();
 }
 
 bool Tensor::call_accessor()
 {
-    ARM_COMPUTE_ERROR_ON_NULLPTR(_accessor.get());
-    auto cl_tensor = dynamic_cast<arm_compute::CLTensor *>(_tensor.get());
-    if(cl_tensor != nullptr && cl_tensor->buffer() == nullptr)
+    // Early exit guard
+    if(!_accessor || !_handle)
     {
-        cl_tensor->map();
+        return false;
     }
-    bool retval = _accessor->access_tensor(*_tensor);
-    if(cl_tensor != nullptr)
+
+    // Map tensor
+    _handle->map(true);
+
+    // Return in case of null backend buffer
+    if(_handle->tensor().buffer() == nullptr)
     {
-        cl_tensor->unmap();
+        return false;
     }
-    return retval;
+
+    // Call accessor
+    _accessor->access_tensor(_handle->tensor());
+
+    // Unmap tensor
+    _handle->unmap();
+
+    return true;
 }
 
-bool Tensor::has_accessor() const
+void Tensor::bind_edge(EdgeID eid)
 {
-    return (_accessor != nullptr);
+    _bound_edges.insert(eid);
 }
 
-arm_compute::ITensor *Tensor::tensor()
+void Tensor::unbind_edge(EdgeID eid)
 {
-    return _tensor.get();
+    _bound_edges.erase(eid);
 }
 
-const arm_compute::ITensor *Tensor::tensor() const
+const std::set<EdgeID> Tensor::bound_edges() const
 {
-    return _tensor.get();
+    return _bound_edges;
 }
-
-const TensorInfo &Tensor::info() const
-{
-    return _info;
-}
-
-arm_compute::ITensor *Tensor::set_target(TargetHint target)
-{
-    if(_tensor != nullptr)
-    {
-        ARM_COMPUTE_ERROR_ON(target != _target);
-    }
-    else
-    {
-        switch(target)
-        {
-            case TargetHint::OPENCL:
-                _tensor = initialise_tensor<arm_compute::CLTensor>(_info);
-                break;
-            case TargetHint::NEON:
-                _tensor = initialise_tensor<arm_compute::Tensor>(_info);
-                break;
-            default:
-                ARM_COMPUTE_ERROR("Invalid TargetHint");
-        }
-        _target = target;
-    }
-    return _tensor.get();
-}
-
-void Tensor::allocate()
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(_tensor.get());
-    switch(_target)
-    {
-        case TargetHint::OPENCL:
-            tensor_allocate<arm_compute::CLTensor>(*_tensor);
-            break;
-        case TargetHint::NEON:
-            tensor_allocate<arm_compute::Tensor>(*_tensor);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Invalid TargetHint");
-    }
-}
-
-void Tensor::allocate_and_fill_if_needed()
-{
-    allocate();
-    if(_accessor != nullptr)
-    {
-        call_accessor();
-    }
-}
-
-TargetHint Tensor::target() const
-{
-    return _target;
-}
+} // namespace graph
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/Utils.cpp b/src/graph/Utils.cpp
similarity index 87%
rename from src/graph2/Utils.cpp
rename to src/graph/Utils.cpp
index a3e90f4..8537bbf 100644
--- a/src/graph2/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -21,19 +21,19 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/Utils.h"
+#include "arm_compute/graph/Utils.h"
 
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
-#include "arm_compute/graph2/mutators/GraphMutators.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/mutators/GraphMutators.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 bool is_target_supported(Target target)
 {
-    return backends::BackendRegistry::get().contains(target);
+    return backends::BackendRegistry::get().contains(target) && backends::BackendRegistry::get().find_backend(target)->is_backend_supported();
 }
 
 Target get_default_target()
@@ -100,5 +100,5 @@
         backend.second->setup_backend_context(ctx);
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/Workload.cpp b/src/graph/Workload.cpp
similarity index 88%
rename from src/graph2/Workload.cpp
rename to src/graph/Workload.cpp
index 3fd36fa..c53a8a4 100644
--- a/src/graph2/Workload.cpp
+++ b/src/graph/Workload.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/Workload.h"
+#include "arm_compute/graph/Workload.h"
 
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/ITensorHandle.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorHandle.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 void ExecutionTask::operator()()
 {
@@ -37,5 +37,5 @@
         task->run();
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/BackendRegistry.cpp b/src/graph/backends/BackendRegistry.cpp
similarity index 92%
rename from src/graph2/backends/BackendRegistry.cpp
rename to src/graph/backends/BackendRegistry.cpp
index 5f1218f..2803322 100644
--- a/src/graph2/backends/BackendRegistry.cpp
+++ b/src/graph/backends/BackendRegistry.cpp
@@ -21,13 +21,13 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/BackendRegistry.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
 
-using namespace arm_compute::graph2::backends;
+using namespace arm_compute::graph::backends;
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -59,5 +59,5 @@
     return _registered_backends;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph2/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
similarity index 88%
rename from src/graph2/backends/CL/CLDeviceBackend.cpp
rename to src/graph/backends/CL/CLDeviceBackend.cpp
index 71566d2..f10eb33 100644
--- a/src/graph2/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -21,18 +21,18 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/CL/CLDeviceBackend.h"
+#include "arm_compute/graph/backends/CL/CLDeviceBackend.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistrar.h"
-#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
-#include "arm_compute/graph2/backends/CL/CLNodeValidator.h"
-#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
-#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistrar.h"
+#include "arm_compute/graph/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph/backends/CL/CLNodeValidator.h"
+#include "arm_compute/graph/backends/CL/CLSubTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLTensorHandle.h"
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/runtime/BlobLifetimeManager.h"
@@ -45,7 +45,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -114,6 +114,11 @@
     }
 }
 
+bool CLDeviceBackend::is_backend_supported()
+{
+    return arm_compute::opencl_is_available();
+}
+
 std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tensor)
 {
     // Get tensor descriptor
@@ -171,5 +176,5 @@
     return mm;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
similarity index 97%
rename from src/graph2/backends/CL/CLFunctionsFactory.cpp
rename to src/graph/backends/CL/CLFunctionsFactory.cpp
index 5a51b19..1b448fe 100644
--- a/src/graph2/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -21,16 +21,16 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph/backends/CL/CLFunctionFactory.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/Types.h"
-#include "arm_compute/graph2/backends/Utils.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/Types.h"
+#include "arm_compute/graph/backends/Utils.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 #include "arm_compute/runtime/CL/CLFunctions.h"
 
 #include "support/ToolchainSupport.h"
@@ -39,7 +39,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -51,12 +51,12 @@
  *
  * @return Backing tensor if present else nullptr
  */
-arm_compute::ICLTensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+arm_compute::ICLTensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
 {
     arm_compute::ICLTensor *backing_tensor = nullptr;
     if(tensor != nullptr)
     {
-        ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph2::Target::CL);
+        ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph::Target::CL);
         // Get backing tensor handle
         ITensorHandle *tensor_handle = tensor->handle();
         // Get backing tensor
@@ -586,5 +586,5 @@
     }
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
similarity index 91%
rename from src/graph2/backends/CL/CLNodeValidator.cpp
rename to src/graph/backends/CL/CLNodeValidator.cpp
index 8512856..c16b2e6 100644
--- a/src/graph2/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -21,10 +21,10 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/CL/CLNodeValidator.h"
+#include "arm_compute/graph/backends/CL/CLNodeValidator.h"
 
-#include "arm_compute/graph2/backends/ValidateHelpers.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/backends/ValidateHelpers.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 #include "arm_compute/runtime/CL/CLFunctions.h"
@@ -33,7 +33,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -60,5 +60,5 @@
     }
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLSubTensorHandle.cpp b/src/graph/backends/CL/CLSubTensorHandle.cpp
similarity index 95%
rename from src/graph2/backends/CL/CLSubTensorHandle.cpp
rename to src/graph/backends/CL/CLSubTensorHandle.cpp
index 65a1ba4..a1bc8a1 100644
--- a/src/graph2/backends/CL/CLSubTensorHandle.cpp
+++ b/src/graph/backends/CL/CLSubTensorHandle.cpp
@@ -21,13 +21,13 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLSubTensorHandle.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -74,5 +74,5 @@
     return true;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLTensorHandle.cpp b/src/graph/backends/CL/CLTensorHandle.cpp
similarity index 95%
rename from src/graph2/backends/CL/CLTensorHandle.cpp
rename to src/graph/backends/CL/CLTensorHandle.cpp
index 89678fb..563c4d9 100644
--- a/src/graph2/backends/CL/CLTensorHandle.cpp
+++ b/src/graph/backends/CL/CLTensorHandle.cpp
@@ -21,11 +21,11 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+#include "arm_compute/graph/backends/CL/CLTensorHandle.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -74,5 +74,5 @@
     return false;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
similarity index 86%
rename from src/graph2/backends/GLES/GCDeviceBackend.cpp
rename to src/graph/backends/GLES/GCDeviceBackend.cpp
index 7dab422..8cd9994 100644
--- a/src/graph2/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -21,17 +21,17 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/GLES/GCDeviceBackend.h"
+#include "arm_compute/graph/backends/GLES/GCDeviceBackend.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistrar.h"
-#include "arm_compute/graph2/backends/GLES/GCFunctionFactory.h"
-#include "arm_compute/graph2/backends/GLES/GCNodeValidator.h"
-#include "arm_compute/graph2/backends/GLES/GCTensorHandle.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistrar.h"
+#include "arm_compute/graph/backends/GLES/GCFunctionFactory.h"
+#include "arm_compute/graph/backends/GLES/GCNodeValidator.h"
+#include "arm_compute/graph/backends/GLES/GCTensorHandle.h"
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/runtime/BlobLifetimeManager.h"
@@ -44,7 +44,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -75,6 +75,11 @@
     }
 }
 
+bool GCDeviceBackend::is_backend_supported()
+{
+    return arm_compute::opengles31_is_available();
+}
+
 std::unique_ptr<ITensorHandle> GCDeviceBackend::create_tensor(const Tensor &tensor)
 {
     // Get tensor descriptor
@@ -129,5 +134,5 @@
     return mm;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCFunctionsFactory.cpp b/src/graph/backends/GLES/GCFunctionsFactory.cpp
similarity index 97%
rename from src/graph2/backends/GLES/GCFunctionsFactory.cpp
rename to src/graph/backends/GLES/GCFunctionsFactory.cpp
index 24ab2bc..12e7c04 100644
--- a/src/graph2/backends/GLES/GCFunctionsFactory.cpp
+++ b/src/graph/backends/GLES/GCFunctionsFactory.cpp
@@ -21,16 +21,16 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/GLES/GCFunctionFactory.h"
+#include "arm_compute/graph/backends/GLES/GCFunctionFactory.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/Types.h"
-#include "arm_compute/graph2/backends/Utils.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/Types.h"
+#include "arm_compute/graph/backends/Utils.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 #include "arm_compute/runtime/GLES_COMPUTE/GCFunctions.h"
 
 #include "support/ToolchainSupport.h"
@@ -39,7 +39,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -51,12 +51,12 @@
  *
  * @return Backing tensor if present else nullptr
  */
-arm_compute::IGCTensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+arm_compute::IGCTensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
 {
     arm_compute::IGCTensor *backing_tensor = nullptr;
     if(tensor != nullptr)
     {
-        ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph2::Target::GC);
+        ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph::Target::GC);
         // Get backing tensor handle
         ITensorHandle *tensor_handle = tensor->handle();
         // Get backing tensor
@@ -503,5 +503,5 @@
     }
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCNodeValidator.cpp b/src/graph/backends/GLES/GCNodeValidator.cpp
similarity index 95%
rename from src/graph2/backends/GLES/GCNodeValidator.cpp
rename to src/graph/backends/GLES/GCNodeValidator.cpp
index b8daae5..1e89265 100644
--- a/src/graph2/backends/GLES/GCNodeValidator.cpp
+++ b/src/graph/backends/GLES/GCNodeValidator.cpp
@@ -21,10 +21,10 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/GLES/GCNodeValidator.h"
+#include "arm_compute/graph/backends/GLES/GCNodeValidator.h"
 
-#include "arm_compute/graph2/backends/ValidateHelpers.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/backends/ValidateHelpers.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 #include "arm_compute/runtime/GLES_COMPUTE/GCFunctions.h"
@@ -33,7 +33,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -118,5 +118,5 @@
     }
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/GLES/GCTensorHandle.cpp b/src/graph/backends/GLES/GCTensorHandle.cpp
similarity index 95%
rename from src/graph2/backends/GLES/GCTensorHandle.cpp
rename to src/graph/backends/GLES/GCTensorHandle.cpp
index 2165cd2..ae7c778 100644
--- a/src/graph2/backends/GLES/GCTensorHandle.cpp
+++ b/src/graph/backends/GLES/GCTensorHandle.cpp
@@ -21,11 +21,11 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/GLES/GCTensorHandle.h"
+#include "arm_compute/graph/backends/GLES/GCTensorHandle.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -74,5 +74,5 @@
     return false;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
similarity index 86%
rename from src/graph2/backends/NEON/NEDeviceBackend.cpp
rename to src/graph/backends/NEON/NEDeviceBackend.cpp
index 6cb507b..87f88df 100644
--- a/src/graph2/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -21,18 +21,18 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/NEON/NEDeviceBackend.h"
+#include "arm_compute/graph/backends/NEON/NEDeviceBackend.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/INode.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistrar.h"
-#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
-#include "arm_compute/graph2/backends/NEON/NENodeValidator.h"
-#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
-#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistrar.h"
+#include "arm_compute/graph/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph/backends/NEON/NENodeValidator.h"
+#include "arm_compute/graph/backends/NEON/NESubTensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
 
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/runtime/Allocator.h"
@@ -46,7 +46,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -78,6 +78,11 @@
     }
 }
 
+bool NEDeviceBackend::is_backend_supported()
+{
+    return true;
+}
+
 std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tensor)
 {
     // Get tensor descriptor
@@ -137,5 +142,5 @@
     return mm;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
similarity index 98%
rename from src/graph2/backends/NEON/NEFunctionFactory.cpp
rename to src/graph/backends/NEON/NEFunctionFactory.cpp
index 9332103..228af9c 100644
--- a/src/graph2/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph/backends/NEON/NEFunctionFactory.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/backends/Utils.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/backends/Utils.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 #include "arm_compute/runtime/NEON/NEFunctions.h"
 #include "support/ToolchainSupport.h"
 
@@ -37,7 +37,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -49,7 +49,7 @@
  *
  * @return Backing tensor if present else nullptr
  */
-arm_compute::ITensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+arm_compute::ITensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
 {
     return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : &tensor->handle()->tensor();
 }
@@ -559,5 +559,5 @@
     }
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
similarity index 91%
rename from src/graph2/backends/NEON/NENodeValidator.cpp
rename to src/graph/backends/NEON/NENodeValidator.cpp
index 4620f4c..074f035 100644
--- a/src/graph2/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -21,10 +21,10 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/NEON/NENodeValidator.h"
+#include "arm_compute/graph/backends/NEON/NENodeValidator.h"
 
-#include "arm_compute/graph2/backends/ValidateHelpers.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/backends/ValidateHelpers.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 #include "arm_compute/runtime/NEON/NEFunctions.h"
@@ -33,7 +33,7 @@
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -61,5 +61,5 @@
     }
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NESubTensorHandle.cpp b/src/graph/backends/NEON/NESubTensorHandle.cpp
similarity index 94%
rename from src/graph2/backends/NEON/NESubTensorHandle.cpp
rename to src/graph/backends/NEON/NESubTensorHandle.cpp
index 1cd15be..c48ba6b 100644
--- a/src/graph2/backends/NEON/NESubTensorHandle.cpp
+++ b/src/graph/backends/NEON/NESubTensorHandle.cpp
@@ -21,11 +21,11 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NESubTensorHandle.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -71,5 +71,5 @@
     return true;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NETensorHandle.cpp b/src/graph/backends/NEON/NETensorHandle.cpp
similarity index 94%
rename from src/graph2/backends/NEON/NETensorHandle.cpp
rename to src/graph/backends/NEON/NETensorHandle.cpp
index 0b901c3..8508ac9 100644
--- a/src/graph2/backends/NEON/NETensorHandle.cpp
+++ b/src/graph/backends/NEON/NETensorHandle.cpp
@@ -21,11 +21,11 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace backends
 {
@@ -73,5 +73,5 @@
     return false;
 }
 } // namespace backends
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp
similarity index 94%
rename from src/graph2/detail/ExecutionHelpers.cpp
rename to src/graph/detail/ExecutionHelpers.cpp
index 3688d0b..5a50728 100644
--- a/src/graph2/detail/ExecutionHelpers.cpp
+++ b/src/graph/detail/ExecutionHelpers.cpp
@@ -21,17 +21,17 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/detail/ExecutionHelpers.h"
+#include "arm_compute/graph/detail/ExecutionHelpers.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/GraphContext.h"
-#include "arm_compute/graph2/GraphManager.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/GraphManager.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace detail
 {
@@ -195,5 +195,5 @@
     }
 }
 } // namespace detail
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/frontend/Stream.cpp b/src/graph/frontend/Stream.cpp
similarity index 91%
rename from src/graph2/frontend/Stream.cpp
rename to src/graph/frontend/Stream.cpp
index 4e794f2..96a166c 100644
--- a/src/graph2/frontend/Stream.cpp
+++ b/src/graph/frontend/Stream.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/frontend/Stream.h"
+#include "arm_compute/graph/frontend/Stream.h"
 
-#include "arm_compute/graph2/Utils.h"
-#include "arm_compute/graph2/frontend/ILayer.h"
+#include "arm_compute/graph/Utils.h"
+#include "arm_compute/graph/frontend/ILayer.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace frontend
 {
@@ -65,5 +65,5 @@
     return _g;
 }
 } // namespace frontend
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/frontend/SubStream.cpp b/src/graph/frontend/SubStream.cpp
similarity index 89%
rename from src/graph2/frontend/SubStream.cpp
rename to src/graph/frontend/SubStream.cpp
index e6fa605..e8bd23a 100644
--- a/src/graph2/frontend/SubStream.cpp
+++ b/src/graph/frontend/SubStream.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/frontend/SubStream.h"
+#include "arm_compute/graph/frontend/SubStream.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/frontend/ILayer.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/frontend/ILayer.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace frontend
 {
@@ -55,5 +55,5 @@
     return _s.graph();
 }
 } // namespace frontend
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph2/mutators/DepthConcatSubTensorMutator.cpp b/src/graph/mutators/DepthConcatSubTensorMutator.cpp
similarity index 91%
rename from src/graph2/mutators/DepthConcatSubTensorMutator.cpp
rename to src/graph/mutators/DepthConcatSubTensorMutator.cpp
index ea3743b..c56f4c5 100644
--- a/src/graph2/mutators/DepthConcatSubTensorMutator.cpp
+++ b/src/graph/mutators/DepthConcatSubTensorMutator.cpp
@@ -21,19 +21,19 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h"
+#include "arm_compute/graph/mutators/DepthConcatSubTensorMutator.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
-#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 #include "arm_compute/core/utils/misc/Iterable.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 const char *DepthConcatSubTensorMutator::name()
 {
@@ -82,5 +82,5 @@
         }
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph2/mutators/InPlaceOperationMutator.cpp b/src/graph/mutators/InPlaceOperationMutator.cpp
similarity index 92%
rename from src/graph2/mutators/InPlaceOperationMutator.cpp
rename to src/graph/mutators/InPlaceOperationMutator.cpp
index bb13e98..bd3f098 100644
--- a/src/graph2/mutators/InPlaceOperationMutator.cpp
+++ b/src/graph/mutators/InPlaceOperationMutator.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/mutators/InPlaceOperationMutator.h"
+#include "arm_compute/graph/mutators/InPlaceOperationMutator.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 const char *InPlaceOperationMutator::name()
 {
@@ -59,5 +59,5 @@
         }
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph2/mutators/NodeFusionMutator.cpp b/src/graph/mutators/NodeFusionMutator.cpp
similarity index 94%
rename from src/graph2/mutators/NodeFusionMutator.cpp
rename to src/graph/mutators/NodeFusionMutator.cpp
index d0ab3e7..2e893c2 100644
--- a/src/graph2/mutators/NodeFusionMutator.cpp
+++ b/src/graph/mutators/NodeFusionMutator.cpp
@@ -21,17 +21,17 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/mutators/NodeFusionMutator.h"
+#include "arm_compute/graph/mutators/NodeFusionMutator.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 namespace detail
 {
@@ -92,5 +92,5 @@
 {
     detail::fuse_batch_norm_with_activation(g);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph2/mutators/SplitLayerSubTensorMutator.cpp b/src/graph/mutators/SplitLayerSubTensorMutator.cpp
similarity index 92%
rename from src/graph2/mutators/SplitLayerSubTensorMutator.cpp
rename to src/graph/mutators/SplitLayerSubTensorMutator.cpp
index 33494ba..179a6c3 100644
--- a/src/graph2/mutators/SplitLayerSubTensorMutator.cpp
+++ b/src/graph/mutators/SplitLayerSubTensorMutator.cpp
@@ -21,19 +21,19 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/mutators/SplitLayerSubTensorMutator.h"
+#include "arm_compute/graph/mutators/SplitLayerSubTensorMutator.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Logger.h"
-#include "arm_compute/graph2/backends/BackendRegistry.h"
-#include "arm_compute/graph2/nodes/SplitLayerNode.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/backends/BackendRegistry.h"
+#include "arm_compute/graph/nodes/SplitLayerNode.h"
 
 #include "arm_compute/core/utils/misc/Cast.h"
 #include "arm_compute/core/utils/misc/Iterable.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 const char *SplitLayerSubTensorMutator::name()
 {
@@ -85,5 +85,5 @@
         }
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph/nodes/ActivationLayer.cpp b/src/graph/nodes/ActivationLayer.cpp
deleted file mode 100644
index 546c42a..0000000
--- a/src/graph/nodes/ActivationLayer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ActivationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-ActivationLayer::ActivationLayer(const ActivationLayerInfo activation_info)
-    : _activation_info(activation_info)
-{
-    set_supports_in_place(true);
-}
-
-std::unique_ptr<arm_compute::IFunction> ActivationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::ActivationLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<ActivationLayerInfo>("ActivationLayerInfo", _activation_info);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::ActivationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
similarity index 92%
rename from src/graph2/nodes/ActivationLayerNode.cpp
rename to src/graph/nodes/ActivationLayerNode.cpp
index c7c36e9..9996d2c 100644
--- a/src/graph2/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/ActivationLayerNode.h"
+#include "arm_compute/graph/nodes/ActivationLayerNode.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info)
     : _info(info)
@@ -79,5 +79,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/BatchNormalizationLayer.cpp b/src/graph/nodes/BatchNormalizationLayer.cpp
deleted file mode 100644
index 24287ac..0000000
--- a/src/graph/nodes/BatchNormalizationLayer.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/BatchNormalizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    unsigned int batch_norm_size = in->info()->dimension(2);
-    if(_mean.tensor() == nullptr)
-    {
-        _mean.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
-    }
-    if(_var.tensor() == nullptr)
-    {
-        _var.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
-    }
-    if(_beta.tensor() == nullptr)
-    {
-        _beta.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
-    }
-    if(_gamma.tensor() == nullptr)
-    {
-        _gamma.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
-    }
-
-    bool mean_is_loaded  = _mean.tensor() != nullptr;
-    bool var_is_loaded   = _var.tensor() != nullptr;
-    bool gamma_is_loaded = _gamma.tensor() != nullptr;
-    bool beta_is_loaded  = _beta.tensor() != nullptr;
-
-    // Set mean, var, gamma and beta target
-    _mean.set_target(_target_hint);
-    _var.set_target(_target_hint);
-    _gamma.set_target(_target_hint);
-    _beta.set_target(_target_hint);
-
-    // Create node context
-    NodeContext node_ctx(OperationType::BatchNormalizationLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_input(_mean.tensor());
-    node_ctx.add_input(_var.tensor());
-    node_ctx.add_input(_beta.tensor());
-    node_ctx.add_input(_gamma.tensor());
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<float>("epsilon", _epsilon);
-    node_ctx.add_parameter<ActivationLayerInfo>("act_info", _act_info);
-
-    // Configure operation
-    auto func = OperationRegistry::get().find_operation(OperationType::BatchNormalizationLayer, _target_hint)->configure(node_ctx);
-
-    // Fill tensors
-    if(!mean_is_loaded)
-    {
-        _mean.allocate_and_fill_if_needed();
-    }
-    if(!var_is_loaded)
-    {
-        _var.allocate_and_fill_if_needed();
-    }
-    if(!gamma_is_loaded)
-    {
-        _gamma.allocate_and_fill_if_needed();
-    }
-    if(!beta_is_loaded)
-    {
-        _beta.allocate_and_fill_if_needed();
-    }
-
-    // Get function
-    return func;
-}
\ No newline at end of file
diff --git a/src/graph2/nodes/BatchNormalizationLayerNode.cpp b/src/graph/nodes/BatchNormalizationLayerNode.cpp
similarity index 93%
rename from src/graph2/nodes/BatchNormalizationLayerNode.cpp
rename to src/graph/nodes/BatchNormalizationLayerNode.cpp
index b9f6342..f7b041c 100644
--- a/src/graph2/nodes/BatchNormalizationLayerNode.cpp
+++ b/src/graph/nodes/BatchNormalizationLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/BatchNormalizationLayerNode.h"
+#include "arm_compute/graph/nodes/BatchNormalizationLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 BatchNormalizationLayerNode::BatchNormalizationLayerNode(float epsilon, ActivationLayerInfo fused_activation)
     : _epsilon(epsilon), _fused_activation(fused_activation)
@@ -90,5 +90,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/BranchLayer.cpp b/src/graph/nodes/BranchLayer.cpp
deleted file mode 100644
index 7a20a56..0000000
--- a/src/graph/nodes/BranchLayer.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/BranchLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/SubGraph.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "support/ToolchainSupport.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-/** Branch function */
-class BranchFunction final : public arm_compute::IFunction
-{
-public:
-    /** Default Constructor */
-    BranchFunction()
-        : _graphs()
-    {
-    }
-    /** Registers graph to be executed by the branch function
-     *
-     * @param[in] graph Graph to register
-     */
-    void register_graph(std::unique_ptr<Graph> graph)
-    {
-        _graphs.push_back(std::move(graph));
-    }
-    // Inherited methods overriden:
-    void run() override
-    {
-        for(auto &g : _graphs)
-        {
-            ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
-            g->run();
-        }
-    }
-
-private:
-    std::vector<std::unique_ptr<Graph>> _graphs;
-};
-
-std::unique_ptr<arm_compute::IFunction> BranchLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON(_branch_merge_method != BranchMergeMethod::DEPTH_CONCATENATE);
-    ARM_COMPUTE_UNUSED(_branch_merge_method);
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    // Create branch function
-    auto func = arm_compute::support::cpp14::make_unique<BranchFunction>();
-
-    // Track output depth
-    int depth = 0;
-
-    // Constuct all sub-graphs given the input/output
-    for(auto &sg : _sub_graphs)
-    {
-        ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
-
-        // IO buffers
-        std::unique_ptr<ITensorObject> in;
-        std::unique_ptr<ITensorObject> out;
-        SubTensor                     *out_sub_tensor = nullptr;
-
-        // Create input sub-tensor
-        if(!sg->has_input())
-        {
-            ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
-            in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
-                                                                     input->tensor()->info()->tensor_shape(),
-                                                                     Coordinates());
-        }
-
-        // Create output sub-tensor
-        if(!sg->has_output())
-        {
-            ARM_COMPUTE_ERROR_ON((dynamic_cast<Tensor *>(output) == nullptr) && (dynamic_cast<SubTensor *>(output) == nullptr));
-
-            out = arm_compute::support::cpp14::make_unique<SubTensor>(output->tensor(),
-                                                                      TensorShape(),
-                                                                      Coordinates(0, 0, depth),
-                                                                      output->target(),
-                                                                      true);
-            out_sub_tensor = dynamic_cast<SubTensor *>(out.get());
-        }
-
-        // Construct sub_graph
-        auto g = sg->construct(ctx, std::move(in), std::move(out));
-
-        // Register graph to function
-        func->register_graph(std::move(g));
-
-        // Update and track depth
-        if(out_sub_tensor != nullptr)
-        {
-            ARM_COMPUTE_ERROR_ON(out_sub_tensor->tensor() == nullptr);
-            depth += out_sub_tensor->tensor()->info()->tensor_shape()[2];
-        }
-    }
-
-    return std::move(func);
-}
\ No newline at end of file
diff --git a/src/graph2/nodes/ConstNode.cpp b/src/graph/nodes/ConstNode.cpp
similarity index 91%
rename from src/graph2/nodes/ConstNode.cpp
rename to src/graph/nodes/ConstNode.cpp
index 5bd6a81..631971c 100644
--- a/src/graph2/nodes/ConstNode.cpp
+++ b/src/graph/nodes/ConstNode.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/ConstNode.h"
+#include "arm_compute/graph/nodes/ConstNode.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 ConstNode::ConstNode(TensorDescriptor desc)
     : _desc(desc)
@@ -68,5 +68,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph/nodes/ConvolutionLayer.cpp b/src/graph/nodes/ConvolutionLayer.cpp
deleted file mode 100644
index 5b3a84a..0000000
--- a/src/graph/nodes/ConvolutionLayer.cpp
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ConvolutionLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
-#include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
-#include "arm_compute/runtime/NEON/functions/NEWinogradLayer.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-namespace
-{
-/** Calculates the output shaped of the convolution layer
- *
- * @param[in] input_shape   Input tensor shape
- * @param[in] weights_shape Weights shape
- * @param[in] conv_info     Convolution information (padding, stride, etc.)
- *
- * @return The expected output tensor shape
- */
-TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_shape, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
-{
-    unsigned int output_width  = 0;
-    unsigned int output_height = 0;
-
-    // Get output width and height
-    std::tie(output_width, output_height) = arm_compute::scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
-
-    // Create output shape
-    TensorShape output_shape = input_shape;
-    output_shape.set(0, output_width);
-    output_shape.set(1, output_height);
-    output_shape.set(2, weights_shape[3]);
-
-    return output_shape;
-}
-
-// Instantiate GEMM based convolution layer
-template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_function(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
-                                                             const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
-{
-    auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
-    conv->configure(
-        dynamic_cast<TensorType *>(input),
-        dynamic_cast<TensorType *>(weights),
-        dynamic_cast<TensorType *>(biases),
-        dynamic_cast<TensorType *>(output),
-        conv_info, weights_info);
-    return std::move(conv);
-}
-
-// Instantiate direct convolution layer
-template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
-                                                                    const PadStrideInfo &conv_info)
-{
-    auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
-    conv->configure(
-        dynamic_cast<TensorType *>(input),
-        dynamic_cast<TensorType *>(weights),
-        dynamic_cast<TensorType *>(biases),
-        dynamic_cast<TensorType *>(output),
-        conv_info);
-    return std::move(conv);
-}
-
-template <TargetHint                    target_hint>
-std::unique_ptr<arm_compute::IFunction> instantiate(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
-                                                    const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
-                                                    ConvolutionMethodHint conv_method);
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
-                                                                        const PadStrideInfo &conv_info,
-                                                                        const WeightsInfo    &weights_info,
-                                                                        ConvolutionMethodHint conv_method)
-{
-    if((conv_method == ConvolutionMethodHint::WINOGRAD)
-       && arm_compute::CLWinogradConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLWinogradConvolutionLayer");
-        return instantiate_direct_function<arm_compute::CLWinogradConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
-    }
-    else if((conv_method == ConvolutionMethodHint::DIRECT)
-            && arm_compute::CLDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDirectConvolutionLayer");
-        return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
-    }
-    else
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLConvolutionLayer");
-        return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
-    }
-}
-
-template <>
-std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(arm_compute::ITensor *input, arm_compute::ITensor *weights, arm_compute::ITensor *biases, arm_compute::ITensor *output,
-                                                                      const PadStrideInfo &conv_info,
-                                                                      const WeightsInfo    &weights_info,
-                                                                      ConvolutionMethodHint conv_method)
-{
-    const unsigned int kernel_size_x = weights->info()->tensor_shape().x();
-    const unsigned int kernel_size_y = weights->info()->tensor_shape().y();
-    const unsigned int conv_stride_x = conv_info.stride().first;
-    const unsigned int conv_stride_y = conv_info.stride().second;
-
-    bool is_square_kernel = (kernel_size_x == kernel_size_y);
-    bool has_same_stride  = (conv_stride_x == conv_stride_y);
-
-    // TODO (COMPID-765) : Winograd should have a validate function
-    if(conv_method == ConvolutionMethodHint::WINOGRAD && is_square_kernel && ((kernel_size_x == 3) || (kernel_size_x == 5)) && has_same_stride && (conv_info.stride().first == 1))
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEWinogradConvolutionLayer");
-        return instantiate_direct_function<arm_compute::NEWinogradLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
-    }
-    else if((conv_method == ConvolutionMethodHint::DIRECT)
-            && arm_compute::NEDirectConvolutionLayer::validate(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info)) // NOLINT
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDirectConvolutionLayer");
-        return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
-    }
-    else
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEConvolutionLayer");
-        return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
-    }
-}
-} // namespace
-
-/** Grouped Convolution function */
-class GroupedConvolutionFunction final : public arm_compute::IFunction
-{
-public:
-    /** Default Constructor */
-    GroupedConvolutionFunction() = default;
-    /** Default Destructor */
-    ~GroupedConvolutionFunction() final = default;
-    /** Prevent instances from being copy constructed */
-    GroupedConvolutionFunction(const GroupedConvolutionFunction &) = delete;
-    /** Prevent instances from being copy assigned */
-    GroupedConvolutionFunction &operator=(const GroupedConvolutionFunction &) = delete;
-    /** Allow instances to be move constructed */
-    GroupedConvolutionFunction(GroupedConvolutionFunction &&) noexcept = default;
-    /** Allow instances to be move assigned */
-    GroupedConvolutionFunction &operator=(GroupedConvolutionFunction &&) noexcept = default;
-    /** Adds a convolution
-     *
-     * @param convolution Convolution function to add
-     */
-    void add_convolution_function(std::unique_ptr<IFunction> convolution) // NOLINT
-    {
-        _convolutions.emplace_back(std::move(convolution));
-    }
-
-    // Inherited methods overridden:
-    void run() override
-    {
-        for(auto &c : _convolutions)
-        {
-            c->run();
-        }
-    }
-
-private:
-    std::vector<std::unique_ptr<IFunction>> _convolutions{};
-};
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-
-    // Set weights and biases info
-    if(_weights.tensor() == nullptr)
-    {
-        TensorInfo info = TensorInfo(TensorShape(_conv_width, _conv_height, in->info()->dimension(2) / _num_groups, _ofm),
-                                     in->info()->num_channels(),
-                                     in->info()->data_type(),
-                                     in->info()->fixed_point_position());
-        info.set_quantization_info(_weights_quant_info);
-        _weights.set_info(std::move(info));
-    }
-    if(_biases.has_accessor() && _biases.tensor() == nullptr)
-    {
-        DataType dt = in->info()->data_type();
-        _biases.set_info(TensorInfo(TensorShape(_ofm), in->info()->num_channels(), is_data_type_quantized_asymmetric(dt) ? DataType::S32 : dt, in->info()->fixed_point_position()));
-    }
-
-    std::unique_ptr<arm_compute::IFunction> func;
-    _target_hint                                 = ctx.hints().target_hint();
-    const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
-
-    // Check if the weights and biases are loaded
-    bool weights_are_loaded = _weights.tensor() != nullptr;
-    bool biases_are_loaded  = _biases.has_accessor() ? _biases.tensor() != nullptr : true;
-
-    // Set bias and weights target
-    _weights.set_target(_target_hint);
-    if(_biases.has_accessor())
-    {
-        _biases.set_target(_target_hint);
-    }
-
-    // Calculate output shape
-    TensorShape output_shape = calculate_convolution_layer_output_shape(in->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
-
-    // Output auto inizialitation if not yet initialized
-    arm_compute::auto_init_if_empty(*out->info(), output_shape, 1, in->info()->data_type(), in->info()->fixed_point_position(),
-                                    (_out_quant_info.empty()) ? in->info()->quantization_info() : _out_quant_info);
-
-    // Create appropriate convolution function
-    if(_num_groups == 1)
-    {
-        func = instantiate_convolution(in, out, conv_method_hint);
-    }
-    else
-    {
-        func = instantiate_grouped_convolution(in, out, conv_method_hint);
-    }
-
-    // Fill weights
-    if(!weights_are_loaded)
-    {
-        _weights.allocate_and_fill_if_needed();
-    }
-    // Fill biases
-    if(!biases_are_loaded)
-    {
-        _biases.allocate_and_fill_if_needed();
-    }
-
-    ARM_COMPUTE_LOG_GRAPH_INFO(" Data Type: " << in->info()->data_type()
-                               << " Input Shape: " << in->info()->tensor_shape()
-                               << " Weights shape: " << _weights.info().tensor_shape()
-                               << " Biases Shape: " << _biases.info().tensor_shape()
-                               << " Output Shape: " << out->info()->tensor_shape()
-                               << " PadStrideInfo: " << _conv_info
-                               << " Groups: " << _num_groups
-                               << " WeightsInfo: " << _weights_info
-                               << std::endl);
-
-    return func;
-}
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
-{
-    std::unique_ptr<arm_compute::IFunction> func;
-    if(_target_hint == TargetHint::OPENCL)
-    {
-        func = instantiate<TargetHint::OPENCL>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
-    }
-    else
-    {
-        func = instantiate<TargetHint::NEON>(input, _weights.tensor(), _biases.tensor(), output, _conv_info, _weights_info, conv_method_hint);
-    }
-    return func;
-}
-
-std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ITensor *input, ITensor *output, ConvolutionMethodHint conv_method_hint)
-{
-    // Get tensor shapes
-    TensorShape input_shape   = input->info()->tensor_shape();
-    TensorShape output_shape  = output->info()->tensor_shape();
-    TensorShape weights_shape = _weights.info().tensor_shape();
-    TensorShape biases_shape  = _biases.info().tensor_shape();
-
-    ARM_COMPUTE_ERROR_ON_MSG((input_shape.z() % _num_groups) != 0, "Input depth not multiple of the number of groups!");
-    ARM_COMPUTE_ERROR_ON_MSG((output_shape.z() % _num_groups) != 0, "Output depth not multiple of the number of groups!");
-    ARM_COMPUTE_ERROR_ON_MSG((weights_shape[3] % _num_groups) != 0, "Number of kernels not multiple of the number of groups!");
-    ARM_COMPUTE_ERROR_ON_MSG((biases_shape.x() % _num_groups) != 0, "Biases not multiple of the number of groups!");
-
-    // Create a grouped convolution function
-    auto grouped_conv = arm_compute::support::cpp14::make_unique<GroupedConvolutionFunction>();
-
-    // Create sub-tensors vectors
-    _is = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
-    _os = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
-    _ws = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
-    _bs = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
-
-    // Calculate sub-tensor splits
-    const int input_split   = input_shape.z() / _num_groups;
-    const int output_split  = output_shape.z() / _num_groups;
-    const int weights_split = weights_shape[3] / _num_groups;
-    const int biases_split  = biases_shape.x() / _num_groups;
-
-    // Calculate sub-tensor shapes
-    input_shape.set(2, input_split);
-    output_shape.set(2, output_split);
-    weights_shape.set(3, weights_split);
-    biases_shape.set(0, biases_split);
-
-    // Configure sub-tensors
-    for(int i = 0; i < static_cast<int>(_num_groups); ++i)
-    {
-        // Create convolution function
-        std::unique_ptr<arm_compute::IFunction> func;
-
-        // Calculate sub-tensors starting coordinates
-        Coordinates input_coord(0, 0, input_split * i);
-        Coordinates output_coord(0, 0, output_split * i);
-        Coordinates weights_coord(0, 0, 0, weights_split * i);
-        Coordinates biases_coord(biases_split * i);
-
-        // Create sub-tensors for input, output, weights and bias
-        auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
-        _is[i]           = SubTensor(input, input_shape, input_coord, hint_to_use);
-        _os[i]           = SubTensor(output, output_shape, output_coord, hint_to_use);
-        _ws[i]           = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
-        _bs[i]           = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
-
-        // Instantiate convolution function
-        if(_target_hint == TargetHint::OPENCL)
-        {
-            func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
-        }
-        else
-        {
-            func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
-        }
-
-        // Add convolution function to the list of convolutions for the grouped convolution
-        grouped_conv->add_convolution_function(std::move(func));
-    }
-
-    return std::move(grouped_conv);
-}
diff --git a/src/graph2/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
similarity index 94%
rename from src/graph2/nodes/ConvolutionLayerNode.cpp
rename to src/graph/nodes/ConvolutionLayerNode.cpp
index 499b352..4617284 100644
--- a/src/graph2/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/ConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/ConvolutionLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method)
     : _info(std::move(info)), _method(method)
@@ -107,5 +107,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/DeQuantizationLayer.cpp b/src/graph/nodes/DeQuantizationLayer.cpp
deleted file mode 100644
index af9ecee..0000000
--- a/src/graph/nodes/DeQuantizationLayer.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DequantizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> DequantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    _target_hint              = ctx.hints().target_hint();
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-
-    if(_min_max.tensor() == nullptr)
-    {
-        TensorShape shape = in->info()->tensor_shape();
-        shape.set(Window::DimX, 2);
-        shape.remove_dimension(1);
-        shape.remove_dimension(1);
-
-        _min_max.set_info(TensorInfo(shape, in->info()->num_channels(), DataType::F32));
-        _min_max.set_target(_target_hint);
-    }
-
-    bool minmax_is_loaded = _min_max.tensor() != nullptr;
-
-    // Create node context
-    NodeContext node_ctx(OperationType::DequantizationLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(_min_max.tensor());
-    node_ctx.add_output(out);
-
-    // Fill min max
-    if(!minmax_is_loaded)
-    {
-        _min_max.allocate_and_fill_if_needed();
-    }
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::DequantizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/DepthConcatenateLayerNode.cpp b/src/graph/nodes/DepthConcatenateLayerNode.cpp
similarity index 94%
rename from src/graph2/nodes/DepthConcatenateLayerNode.cpp
rename to src/graph/nodes/DepthConcatenateLayerNode.cpp
index dcd6651..1c05397 100644
--- a/src/graph2/nodes/DepthConcatenateLayerNode.cpp
+++ b/src/graph/nodes/DepthConcatenateLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+#include "arm_compute/graph/nodes/DepthConcatenateLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 DepthConcatenateLayerNode::DepthConcatenateLayerNode(unsigned int total_nodes)
     : _total_nodes(total_nodes), _is_enabled(true)
@@ -129,5 +129,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/DepthConvertLayer.cpp b/src/graph/nodes/DepthConvertLayer.cpp
deleted file mode 100644
index 9b328e7..0000000
--- a/src/graph/nodes/DepthConvertLayer.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DepthConvertLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-DepthConvertLayer::DepthConvertLayer(const ConvertPolicy policy, uint32_t shift, DataType output_datatype)
-    : _policy(policy), _shift(shift), _output_datatype(output_datatype)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> DepthConvertLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    _target_hint              = ctx.hints().target_hint();
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-
-    // Auto configure output
-    arm_compute::auto_init_if_empty(*out->info(), in->info()->tensor_shape(), 1, _output_datatype, in->info()->fixed_point_position());
-
-    // Create node context
-    NodeContext node_ctx(OperationType::DepthConvertLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<ConvertPolicy>("ConvertPolicy", _policy);
-    node_ctx.add_parameter<uint32_t>("shift", _shift);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::DepthConvertLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/DepthwiseConvolutionLayer.cpp b/src/graph/nodes/DepthwiseConvolutionLayer.cpp
deleted file mode 100644
index e5101cc..0000000
--- a/src/graph/nodes/DepthwiseConvolutionLayer.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/DepthwiseConvolutionLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> DepthwiseConvolutionLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    if(_weights.tensor() == nullptr)
-    {
-        TensorShape weights_shape(_conv_width, _conv_height, input->tensor()->info()->tensor_shape().z());
-        TensorInfo  info = TensorInfo(TensorShape(weights_shape), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
-        info.set_quantization_info(_quant_info);
-        _weights.set_info(std::move(info));
-    }
-    if(_biases.has_accessor() && _biases.tensor() == nullptr)
-    {
-        DataType dt = in->info()->data_type();
-        _biases.set_info(TensorInfo(TensorShape(in->info()->dimension(2)), in->info()->num_channels(), is_data_type_quantized_asymmetric(dt) ? DataType::S32 : dt, in->info()->fixed_point_position()));
-    }
-
-    bool weights_is_loaded = _weights.tensor() != nullptr;
-    bool biases_is_loaded  = _biases.has_accessor() ? _biases.tensor() != nullptr : true;
-
-    _weights.set_target(_target_hint);
-    if(_biases.has_accessor())
-    {
-        _biases.set_target(_target_hint);
-    }
-
-    // Create node context
-    NodeContext node_ctx(OperationType::DepthwiseConvolutionLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_input(_weights.tensor());
-    if(_biases.has_accessor())
-    {
-        node_ctx.add_input(_biases.tensor());
-    }
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<PadStrideInfo>("ConvolutionInfo", _conv_info);
-    node_ctx.add_parameter<bool>("Optimized3x3", _opt3x3);
-
-    // Configure operation
-    auto func = OperationRegistry::get().find_operation(OperationType::DepthwiseConvolutionLayer, _target_hint)->configure(node_ctx);
-
-    // Fill tensors
-    if(!weights_is_loaded)
-    {
-        _weights.allocate_and_fill_if_needed();
-    }
-    if(!biases_is_loaded)
-    {
-        _biases.allocate_and_fill_if_needed();
-    }
-
-    // Get function
-    return func;
-}
diff --git a/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
similarity index 94%
rename from src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp
rename to src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index b030e8b..67a3902 100644
--- a/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method)
     : _info(std::move(info)), _method(method)
@@ -106,5 +106,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/EltwiseLayerNode.cpp b/src/graph/nodes/EltwiseLayerNode.cpp
similarity index 92%
rename from src/graph2/nodes/EltwiseLayerNode.cpp
rename to src/graph/nodes/EltwiseLayerNode.cpp
index 149d926..b794043 100644
--- a/src/graph2/nodes/EltwiseLayerNode.cpp
+++ b/src/graph/nodes/EltwiseLayerNode.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/EltwiseLayerNode.h"
+#include "arm_compute/graph/nodes/EltwiseLayerNode.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 EltwiseLayerNode::EltwiseLayerNode(EltwiseOperation op)
     : _op(op)
@@ -79,5 +79,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph/nodes/FlattenLayer.cpp b/src/graph/nodes/FlattenLayer.cpp
deleted file mode 100644
index ea08296..0000000
--- a/src/graph/nodes/FlattenLayer.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/FlattenLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> FlattenLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    _target_hint              = ctx.hints().target_hint();
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-
-    // Auto configure output
-    TensorShape tensor_shape = in->info()->tensor_shape();
-    tensor_shape.collapse(in->info()->num_dimensions());
-    arm_compute::auto_init_if_empty(*out->info(), tensor_shape, 1, in->info()->data_type(), in->info()->fixed_point_position());
-
-    // Create node context
-    NodeContext node_ctx(OperationType::FlattenLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::FlattenLayer, _target_hint)->configure(node_ctx);
-}
\ No newline at end of file
diff --git a/src/graph2/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
similarity index 92%
rename from src/graph2/nodes/FlattenLayerNode.cpp
rename to src/graph/nodes/FlattenLayerNode.cpp
index 7c4059f..8b847c7 100644
--- a/src/graph2/nodes/FlattenLayerNode.cpp
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/FlattenLayerNode.h"
+#include "arm_compute/graph/nodes/FlattenLayerNode.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 FlattenLayerNode::FlattenLayerNode()
 {
@@ -76,5 +76,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/FloorLayer.cpp b/src/graph/nodes/FloorLayer.cpp
deleted file mode 100644
index 8750546..0000000
--- a/src/graph/nodes/FloorLayer.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/FloorLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> FloorLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::FloorLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::FloorLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 3742150..cbf2b35 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -21,18 +21,40 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
+#include "arm_compute/graph/nodes/FullyConnectedLayerNode.h"
 
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
-using namespace arm_compute::graph;
-
-namespace
+namespace arm_compute
 {
-TensorShape calculate_fullyconnected_layer_output_shape(const TensorShape &input_shape, unsigned int output_neurons)
+namespace graph
+{
+FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
+    : _num_outputs(num_outputs)
+{
+    _input_edges.resize(3, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
+{
+    unsigned int num_weights    = 1;
+    unsigned int num_dimensions = input_shape.num_dimensions();
+    // Ignore the batch dimension if there is one:
+    if(num_dimensions == 2 || num_dimensions == 4)
+    {
+        num_dimensions--;
+    }
+    for(unsigned int i = 0; i < num_dimensions; i++)
+    {
+        num_weights *= input_shape[i];
+    }
+    return TensorShape(num_weights, num_outputs);
+}
+
+TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
 {
     // Note: Only 1D batch space is supported at the moment
     unsigned int batches = input_shape[1];
@@ -40,67 +62,46 @@
     {
         batches = input_shape[3];
     }
-    return TensorShape(output_neurons, batches);
+    return TensorShape(num_outputs, batches);
 }
-} // namespace
 
-std::unique_ptr<arm_compute::IFunction> FullyConnectedLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool FullyConnectedLayerNode::forward_descriptors()
 {
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    if(_weights.tensor() == nullptr)
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
     {
-        unsigned int num_weights    = 1;
-        unsigned int num_dimensions = in->info()->num_dimensions();
-        // Ignore the batch dimension if there is one:
-        if(num_dimensions == 2 || num_dimensions == 4)
-        {
-            num_dimensions--;
-        }
-        for(unsigned int i = 0; i < num_dimensions; i++)
-        {
-            num_weights *= in->info()->dimension(i);
-        }
-        _weights.set_info(TensorInfo(TensorShape(num_weights, _num_neurons), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
     }
-    if(_biases.tensor() == nullptr)
-    {
-        _biases.set_info(TensorInfo(TensorShape(_num_neurons), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
-    }
-
-    // Auto configure output
-    arm_compute::auto_init_if_empty(*out->info(),
-                                    calculate_fullyconnected_layer_output_shape(in->info()->tensor_shape(), _num_neurons),
-                                    in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position());
-
-    bool weights_are_loaded = _weights.tensor() != nullptr;
-    bool biases_are_loaded  = _biases.tensor() != nullptr;
-
-    // Create node context
-    NodeContext node_ctx(OperationType::FullyConnectedLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_input(_weights.set_target(_target_hint));
-    node_ctx.add_input(_biases.set_target(_target_hint));
-    node_ctx.add_output(out);
-
-    // Configure operation
-    auto func = OperationRegistry::get().find_operation(OperationType::FullyConnectedLayer, _target_hint)->configure(node_ctx);
-
-    // Fill biases
-    if(!weights_are_loaded)
-    {
-        _weights.allocate_and_fill_if_needed();
-    }
-    if(!biases_are_loaded)
-    {
-        _biases.allocate_and_fill_if_needed();
-    }
-
-    // Get function
-    return func;
+    return false;
 }
+
+TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    TensorDescriptor output_info  = src->desc();
+    TensorShape      output_shape = compute_output_shape(src->desc().shape, _num_outputs);
+    output_info.shape             = output_shape;
+    return output_info;
+}
+
+Status FullyConnectedLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType FullyConnectedLayerNode::type() const
+{
+    return NodeType::FullyConnectedLayer;
+}
+
+void FullyConnectedLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/InputNode.cpp b/src/graph/nodes/InputNode.cpp
similarity index 91%
rename from src/graph2/nodes/InputNode.cpp
rename to src/graph/nodes/InputNode.cpp
index 84cce2a..e912633 100644
--- a/src/graph2/nodes/InputNode.cpp
+++ b/src/graph/nodes/InputNode.cpp
@@ -21,14 +21,14 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/InputNode.h"
+#include "arm_compute/graph/nodes/InputNode.h"
 
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 InputNode::InputNode(TensorDescriptor desc)
     : _desc(desc)
@@ -68,5 +68,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph/nodes/L2NormalizeLayer.cpp b/src/graph/nodes/L2NormalizeLayer.cpp
deleted file mode 100644
index 9813ba4..0000000
--- a/src/graph/nodes/L2NormalizeLayer.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/L2NormalizeLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-L2NormalizeLayer::L2NormalizeLayer(unsigned int axis, float epsilon)
-    : _axis(axis), _epsilon(epsilon)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> L2NormalizeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::L2NormalizeLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<unsigned int>("axis", _axis);
-    node_ctx.add_parameter<float>("epsilon", _epsilon);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::L2NormalizeLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/NormalizationLayer.cpp b/src/graph/nodes/NormalizationLayer.cpp
deleted file mode 100644
index a489329..0000000
--- a/src/graph/nodes/NormalizationLayer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/NormalizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-NormalizationLayer::NormalizationLayer(const NormalizationLayerInfo norm_info)
-    : _norm_info(norm_info)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> NormalizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::NormalizationLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<NormalizationLayerInfo>("NormalizationLayerInfo", _norm_info);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::NormalizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/NormalizationLayerNode.cpp b/src/graph/nodes/NormalizationLayerNode.cpp
similarity index 92%
rename from src/graph2/nodes/NormalizationLayerNode.cpp
rename to src/graph/nodes/NormalizationLayerNode.cpp
index a394879..a9f2fbd 100644
--- a/src/graph2/nodes/NormalizationLayerNode.cpp
+++ b/src/graph/nodes/NormalizationLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/NormalizationLayerNode.h"
+#include "arm_compute/graph/nodes/NormalizationLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 NormalizationLayerNode::NormalizationLayerNode(NormalizationLayerInfo norm_info)
     : _info(norm_info)
@@ -80,5 +80,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/OutputNode.cpp b/src/graph/nodes/OutputNode.cpp
similarity index 88%
rename from src/graph2/nodes/OutputNode.cpp
rename to src/graph/nodes/OutputNode.cpp
index 1daebb1..4c63bfa 100644
--- a/src/graph2/nodes/OutputNode.cpp
+++ b/src/graph/nodes/OutputNode.cpp
@@ -21,16 +21,16 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/OutputNode.h"
+#include "arm_compute/graph/nodes/OutputNode.h"
 
 #include "arm_compute/core/Error.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
-#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Tensor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 OutputNode::OutputNode()
 {
@@ -62,5 +62,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph/nodes/PoolingLayer.cpp b/src/graph/nodes/PoolingLayer.cpp
deleted file mode 100644
index 2c15119..0000000
--- a/src/graph/nodes/PoolingLayer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/PoolingLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
-    : _pool_info(pool_info)
-{
-}
-
-std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::PoolingLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-    node_ctx.add_parameter<PoolingLayerInfo>("PoolingLayerInfo", _pool_info);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::PoolingLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/PoolingLayerNode.cpp b/src/graph/nodes/PoolingLayerNode.cpp
similarity index 94%
rename from src/graph2/nodes/PoolingLayerNode.cpp
rename to src/graph/nodes/PoolingLayerNode.cpp
index 2c2cf53..a7b6b36 100644
--- a/src/graph2/nodes/PoolingLayerNode.cpp
+++ b/src/graph/nodes/PoolingLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/PoolingLayerNode.h"
+#include "arm_compute/graph/nodes/PoolingLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 PoolingLayerNode::PoolingLayerNode(PoolingLayerInfo pool_info)
     : _info(std::move(pool_info))
@@ -99,5 +99,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/QuantizationLayer.cpp b/src/graph/nodes/QuantizationLayer.cpp
deleted file mode 100644
index c102f47..0000000
--- a/src/graph/nodes/QuantizationLayer.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/QuantizationLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> QuantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    _target_hint              = ctx.hints().target_hint();
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::QuantizationLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::QuantizationLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
index b0c117e..2757f06 100644
--- a/src/graph/nodes/ReshapeLayer.cpp
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -21,37 +21,61 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/nodes/ReshapeLayer.h"
+#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
 
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
-using namespace arm_compute::graph;
-
-ReshapeLayer::ReshapeLayer(TensorShape shape)
+namespace arm_compute
+{
+namespace graph
+{
+ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
     : _shape(shape)
 {
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
 }
 
-std::unique_ptr<arm_compute::IFunction> ReshapeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+bool ReshapeLayerNode::forward_descriptors()
 {
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    _target_hint              = ctx.hints().target_hint();
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-
-    // Auto configure output
-    arm_compute::auto_init_if_empty(*out->info(), _shape, 1, in->info()->data_type(), in->info()->fixed_point_position(), in->info()->quantization_info());
-
-    // Create node context
-    NodeContext node_ctx(OperationType::ReshapeLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::ReshapeLayer, _target_hint)->configure(node_ctx);
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
 }
+
+TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    TensorDescriptor output_desc = src->desc();
+    output_desc.shape            = _shape;
+
+    return output_desc;
+}
+
+Status ReshapeLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType ReshapeLayerNode::type() const
+{
+    return NodeType::ReshapeLayer;
+}
+
+void ReshapeLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/nodes/ResidualLayer.cpp b/src/graph/nodes/ResidualLayer.cpp
deleted file mode 100644
index 87404f9..0000000
--- a/src/graph/nodes/ResidualLayer.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/ResidualLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "arm_compute/graph/SubGraph.h"
-#include "arm_compute/graph/Tensor.h"
-#include "arm_compute/runtime/IFunction.h"
-#include "support/ToolchainSupport.h"
-#include "utils/Utils.h"
-
-#include <memory>
-#include <tuple>
-#include <vector>
-
-using namespace arm_compute::graph;
-
-/** Residual function */
-class ResidualFunction final : public arm_compute::IFunction
-{
-public:
-    /** Default Constructor */
-    ResidualFunction(GraphContext &ctx, ITensorObject *output)
-        : _ctx(ctx), _input(nullptr), _output(output), _func(nullptr), _graphs(), _graph_outputs()
-    {
-    }
-
-    /** Prevent instances from being copy constructed */
-    ResidualFunction(const ResidualFunction &) = delete;
-    /** Prevent instances from being copy assigned */
-    const ResidualFunction &operator=(const ResidualFunction &) = delete;
-    /** Prevent instances from being move constructed */
-    ResidualFunction(ResidualFunction &&) = delete;
-    /** Prevent instances from being move assigned */
-    ResidualFunction &operator=(ResidualFunction &&) = delete;
-    /** Default destructor */
-    ~ResidualFunction() override = default;
-
-    /** Set the input (when using only one sub graph)
-     *
-     * @param[in] input Input to set
-     */
-    void set_input(std::unique_ptr<ITensorObject> input)
-    {
-        _input = std::move(input);
-    }
-
-    /** Registers graph to be executed by the residual function
-     *
-     * @param[in] graph  Graph to register
-     * @param[in] output Output to register
-     */
-    void register_graph(std::unique_ptr<Graph> graph, std::unique_ptr<ITensorObject> output)
-    {
-        _graphs.push_back(std::move(graph));
-        _graph_outputs.push_back(std::move(output));
-    }
-
-    /** Configure the function */
-    void configure()
-    {
-        ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
-        TargetHint target_hint = _ctx.hints().target_hint();
-
-        // Create node context
-        NodeContext node_ctx(OperationType::ArithmeticAddition);
-        node_ctx.set_target(target_hint);
-
-        if(_graphs.size() == 1)
-        {
-            arm_compute::ITensor *in = _input->tensor();
-            node_ctx.add_input(in);
-        }
-
-        for(auto &o : _graph_outputs)
-        {
-            arm_compute::ITensor *in = o->tensor();
-            node_ctx.add_input(in);
-        }
-
-        arm_compute::ITensor *out = _output->tensor();
-        auto_init_if_empty(*out->info(), *_graph_outputs[0]->tensor()->info());
-        node_ctx.add_output(out);
-
-        _func = OperationRegistry::get().find_operation(OperationType::ArithmeticAddition, target_hint)->configure(node_ctx);
-
-        for(auto &o : _graph_outputs)
-        {
-            o->allocate();
-        }
-    }
-
-    // Inherited methods overriden:
-    void run() override
-    {
-        ARM_COMPUTE_ERROR_ON(_graphs.size() < 1 || _graphs.size() > 2);
-
-        for(auto &g : _graphs)
-        {
-            ARM_COMPUTE_ERROR_ON(g.get() == nullptr);
-            g->run();
-        }
-
-        _func->run();
-    }
-
-private:
-    GraphContext                                _ctx;
-    std::unique_ptr<ITensorObject>              _input;
-    ITensorObject                              *_output;
-    std::unique_ptr<arm_compute::IFunction>     _func;
-    std::vector<std::unique_ptr<Graph>>         _graphs;
-    std::vector<std::unique_ptr<ITensorObject>> _graph_outputs;
-};
-
-std::unique_ptr<arm_compute::IFunction> ResidualLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(input) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<Tensor *>(output) == nullptr);
-
-    // Create residual function
-    auto func = arm_compute::support::cpp14::make_unique<ResidualFunction>(ctx, output);
-
-    if(_sub_graphs.size() == 1)
-    {
-        std::unique_ptr<ITensorObject> original_in;
-        original_in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
-                                                                          input->tensor()->info()->tensor_shape(),
-                                                                          Coordinates());
-        func->set_input(std::move(original_in));
-    }
-
-    // Constuct all sub-graphs given the input/output
-    for(auto &sg : _sub_graphs)
-    {
-        ARM_COMPUTE_ERROR_ON(sg.get() == nullptr);
-
-        // IO buffers
-        std::unique_ptr<ITensorObject> in;
-        std::unique_ptr<ITensorObject> out;
-        std::unique_ptr<ITensorObject> func_in;
-
-        // Create input sub-tensor
-        if(!sg->has_input())
-        {
-            in = arm_compute::support::cpp14::make_unique<SubTensor>(*dynamic_cast<Tensor *>(input),
-                                                                     input->tensor()->info()->tensor_shape(),
-                                                                     Coordinates());
-        }
-
-        // Create output sub-tensor
-        if(!sg->has_output())
-        {
-            ITensorInfo *info = input->tensor()->info();
-            func_in           = arm_compute::support::cpp14::make_unique<Tensor>(TensorInfo(info->num_channels(), info->data_type(), info->fixed_point_position()));
-            func_in->set_target(ctx.hints().target_hint());
-            out = arm_compute::support::cpp14::make_unique<SubTensor>(func_in->tensor(),
-                                                                      TensorShape(),
-                                                                      Coordinates(0, 0, 0),
-                                                                      func_in->target(),
-                                                                      true);
-        }
-
-        // Construct sub_graph
-        auto g = sg->construct(ctx, std::move(in), std::move(out));
-
-        // Register graph to function
-        func->register_graph(std::move(g), std::move(func_in));
-    }
-
-    func->configure();
-
-    return std::move(func);
-}
diff --git a/src/graph/nodes/SoftmaxLayer.cpp b/src/graph/nodes/SoftmaxLayer.cpp
deleted file mode 100644
index 7f2325b..0000000
--- a/src/graph/nodes/SoftmaxLayer.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph/nodes/SoftmaxLayer.h"
-
-#include "arm_compute/graph/Error.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistry.h"
-#include "support/ToolchainSupport.h"
-
-using namespace arm_compute::graph;
-
-std::unique_ptr<arm_compute::IFunction> SoftmaxLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
-{
-    ARM_COMPUTE_ERROR_ON_UNALLOCATED_TENSOR_OBJECT(input, output);
-
-    arm_compute::ITensor *in  = input->tensor();
-    arm_compute::ITensor *out = output->tensor();
-    _target_hint              = ctx.hints().target_hint();
-
-    // Create node context
-    NodeContext node_ctx(OperationType::SoftmaxLayer);
-    node_ctx.set_target(_target_hint);
-    node_ctx.add_input(in);
-    node_ctx.add_output(out);
-
-    // Get function
-    return OperationRegistry::get().find_operation(OperationType::SoftmaxLayer, _target_hint)->configure(node_ctx);
-}
diff --git a/src/graph2/nodes/SoftmaxLayerNode.cpp b/src/graph/nodes/SoftmaxLayerNode.cpp
similarity index 92%
rename from src/graph2/nodes/SoftmaxLayerNode.cpp
rename to src/graph/nodes/SoftmaxLayerNode.cpp
index 83bc978..4c21ac6 100644
--- a/src/graph2/nodes/SoftmaxLayerNode.cpp
+++ b/src/graph/nodes/SoftmaxLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/SoftmaxLayerNode.h"
+#include "arm_compute/graph/nodes/SoftmaxLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 SoftmaxLayerNode::SoftmaxLayerNode(float beta)
     : _beta(beta)
@@ -80,5 +80,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/SplitLayerNode.cpp b/src/graph/nodes/SplitLayerNode.cpp
similarity index 94%
rename from src/graph2/nodes/SplitLayerNode.cpp
rename to src/graph/nodes/SplitLayerNode.cpp
index c34a7ff..c8fb43c 100644
--- a/src/graph2/nodes/SplitLayerNode.cpp
+++ b/src/graph/nodes/SplitLayerNode.cpp
@@ -21,15 +21,15 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/nodes/SplitLayerNode.h"
+#include "arm_compute/graph/nodes/SplitLayerNode.h"
 
 #include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 SplitLayerNode::SplitLayerNode(unsigned int num_splits, unsigned int axis)
     : _num_splits(num_splits), _axis(axis)
@@ -113,5 +113,5 @@
 {
     v.visit(*this);
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
deleted file mode 100644
index fe56122..0000000
--- a/src/graph/operations/CLSimpleOperations.cpp
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/graph/IOperation.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistrar.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/CL/CLFunctions.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-
-using namespace arm_compute::graph;
-
-/* Activation Layer */
-REGISTER_SIMPLE_OPERATION(CLActivationLayerOperation, OPENCL, OperationType::ActivationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in       = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *out      = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
-
-    // Create and configure function
-    auto activation = arm_compute::support::cpp14::make_unique<arm_compute::CLActivationLayer>();
-    activation->configure(in, out, act_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Activation function: " << act_info.activation()
-                               << " a: " << act_info.a()
-                               << " b: " << act_info.b()
-                               << std::endl);
-
-    return std::move(activation);
-}
-
-/* Arithmetic addition */
-REGISTER_SIMPLE_OPERATION(CLArithmeticAdditionOperation, OPENCL, OperationType::ArithmeticAddition)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in1 = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *in2 = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
-    auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    auto addition = arm_compute::support::cpp14::make_unique<arm_compute::CLArithmeticAddition>();
-    addition->configure(in1, in2, out, ConvertPolicy::SATURATE);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLArithmeticAddition"
-                               << " Data Type: " << in1->info()->data_type()
-                               << " Input 1 shape: " << in1->info()->tensor_shape()
-                               << " Input 2 shape: " << in2->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(addition);
-}
-
-/* Batch Normalization Layer */
-REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationType::BatchNormalizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in       = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *mean     = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
-    auto      *var      = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
-    auto      *beta     = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3));
-    auto      *gamma    = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4));
-    auto      *out      = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto epsilon  = ctx.parameter<float>("epsilon");
-    const auto act_info = ctx.parameter<ActivationLayerInfo>("act_info");
-
-    // Create and configure function
-    auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLBatchNormalizationLayer>();
-    batch_norm->configure(in, out, mean, var, beta, gamma, epsilon, act_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Mean shape: " << mean->info()->tensor_shape()
-                               << " Var shape: " << var->info()->tensor_shape()
-                               << " Beta shape: " << beta->info()->tensor_shape()
-                               << " Gamma shape: " << gamma->info()->tensor_shape()
-                               << " Epsilon: " << epsilon
-                               << " Activation function: " << act_info.activation()
-                               << " a: " << act_info.a()
-                               << " b: " << act_info.b()
-                               << std::endl);
-
-    return std::move(batch_norm);
-}
-
-/* DepthConvertLayer Layer */
-REGISTER_SIMPLE_OPERATION(CLDepthConvertLayerOperation, OPENCL, OperationType::DepthConvertLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in          = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *out         = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
-    const auto shift       = ctx.parameter<uint32_t>("shift");
-
-    // Create and configure function
-    auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthConvertLayer>();
-    depthconvert->configure(in, out, conv_policy, shift);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDepthConvertLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " shift: " << shift
-                               << std::endl);
-
-    return std::move(depthconvert);
-}
-
-/* DepthwiseConvolutionLayer Layer */
-REGISTER_SIMPLE_OPERATION(CLDepthwiseConvolutionOperation, OPENCL, OperationType::DepthwiseConvolutionLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2 && ctx.num_inputs() != 3);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in        = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *weights   = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
-    auto      *biases    = ctx.num_inputs() == 3 ? dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) : nullptr;
-    auto      *out       = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto conv_info = ctx.parameter<PadStrideInfo>("ConvolutionInfo");
-    const auto opt3x3    = ctx.parameter<bool>("Optimized3x3");
-
-    // Create and configure function
-    std::unique_ptr<arm_compute::IFunction> func;
-    bool                                    run_3x3_opt = opt3x3 && weights->info()->dimension(0) == 3;
-    if(run_3x3_opt)
-    {
-        auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>();
-        depwthwise_conv->configure(in, weights, biases, out, conv_info);
-        func = std::move(depwthwise_conv);
-    }
-    else
-    {
-        auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
-        depwthwise_conv->configure(in, weights, biases, out, conv_info);
-        func = std::move(depwthwise_conv);
-    }
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDepthwiseConvolutionLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape());
-    if(biases == nullptr)
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: No biases provided" << std::endl);
-    }
-    else
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: " << biases->info()->tensor_shape() << std::endl);
-    }
-
-    return func;
-}
-
-/* DeQuantizationLayer Layer */
-REGISTER_SIMPLE_OPERATION(CLDequantizationLayerOperation, OPENCL, OperationType::DequantizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1)) == nullptr);
-
-    // Extract IO and info
-    auto *in      = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *out     = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    auto *min_max = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1));
-
-    // Create and configure function
-    auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::CLDequantizationLayer>();
-    dequantization->configure(in, out, min_max);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDequantizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Min max shape: " << min_max->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(dequantization);
-}
-
-/* Flatten Layer */
-REGISTER_SIMPLE_OPERATION(CLFlattenLayerOperation, OPENCL, OperationType::FlattenLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::CLFlattenLayer>();
-    flatten->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFlattenLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(flatten);
-}
-
-/* Floor Layer */
-REGISTER_SIMPLE_OPERATION(CLFloorLayerOperation, OPENCL, OperationType::FloorLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto floor = arm_compute::support::cpp14::make_unique<arm_compute::CLFloor>();
-    floor->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFloorLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(floor);
-}
-
-/* Fully Connected Layer */
-REGISTER_SIMPLE_OPERATION(CLFullyConnectedLayer, OPENCL, OperationType::FullyConnectedLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in      = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *weights = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
-    auto *biases  = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
-    auto *out     = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto fc = arm_compute::support::cpp14::make_unique<arm_compute::CLFullyConnectedLayer>();
-    fc->configure(in, weights, biases, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFullyConnectedLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Biases Shape: " << biases->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(fc);
-}
-
-/* L2 Normalize Layer */
-REGISTER_SIMPLE_OPERATION(CLL2NormalizeLayerOperation, OPENCL, OperationType::L2NormalizeLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in      = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *out     = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto axis    = ctx.parameter<unsigned int>("axis");
-    const auto epsilon = ctx.parameter<float>("epsilon");
-
-    // Create and configure function
-    auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2NormalizeLayer>();
-    l2_norm->configure(in, out, axis, epsilon);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLL2NormalizeLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Axis: " << axis
-                               << " Epsilon: " << epsilon
-                               << std::endl);
-
-    return std::move(l2_norm);
-}
-
-/* Normalization Layer */
-REGISTER_SIMPLE_OPERATION(CLNormalizationLayerOperation, OPENCL, OperationType::NormalizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in        = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *out       = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
-
-    // Create and configure function
-    auto norm = arm_compute::support::cpp14::make_unique<arm_compute::CLNormalizationLayer>();
-    norm->configure(in, out, norm_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLNormalizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Normalization info: " << norm_info
-                               << std::endl);
-
-    return std::move(norm);
-}
-
-/* Pooling Layer */
-REGISTER_SIMPLE_OPERATION(CLPoolingLayerOperation, OPENCL, OperationType::PoolingLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in        = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto      *out       = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-    const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
-
-    // Create and configure function
-    auto pool = arm_compute::support::cpp14::make_unique<arm_compute::CLPoolingLayer>();
-    pool->configure(in, out, pool_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLPoolingLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Pooling info: " << pool_info
-                               << std::endl);
-
-    return std::move(pool);
-}
-
-/* Quantization Layer */
-REGISTER_SIMPLE_OPERATION(CLQuantizationLayerOperation, OPENCL, OperationType::QuantizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::CLQuantizationLayer>();
-    quantization->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLQuantizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(quantization);
-}
-
-/* Reshape Layer */
-REGISTER_SIMPLE_OPERATION(CLReshapeLayerOperation, OPENCL, OperationType::ReshapeLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::CLReshapeLayer>();
-    reshape->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLReshapeLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(reshape);
-}
-
-/* Softmax Layer */
-REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::SoftmaxLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto smx = arm_compute::support::cpp14::make_unique<arm_compute::CLSoftmaxLayer>();
-    smx->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLSoftmaxLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(smx);
-}
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
deleted file mode 100644
index 4154b9a..0000000
--- a/src/graph/operations/NESimpleOperations.cpp
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/graph/IOperation.h"
-#include "arm_compute/graph/NodeContext.h"
-#include "arm_compute/graph/OperationRegistrar.h"
-#include "arm_compute/graph/Types.h"
-#include "arm_compute/runtime/NEON/NEFunctions.h"
-#include "support/ToolchainSupport.h"
-#include "utils/GraphTypePrinter.h"
-#include "utils/TypePrinter.h"
-
-#include <memory>
-
-using namespace arm_compute::graph;
-
-/* Activation Layer */
-REGISTER_SIMPLE_OPERATION(NEActivationLayerOperation, NEON, OperationType::ActivationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in       = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *out      = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
-
-    // Create and configure function
-    auto activation = arm_compute::support::cpp14::make_unique<arm_compute::NEActivationLayer>();
-    activation->configure(in, out, act_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEActivationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Activation function: " << act_info.activation()
-                               << " a: " << act_info.a()
-                               << " b: " << act_info.b()
-                               << std::endl);
-
-    return std::move(activation);
-}
-
-/* Arithmetic addition */
-REGISTER_SIMPLE_OPERATION(NEArithmeticAdditionOperation, NEON, OperationType::ArithmeticAddition)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in1 = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *in2 = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
-    auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    auto addition = arm_compute::support::cpp14::make_unique<arm_compute::NEArithmeticAddition>();
-    addition->configure(in1, in2, out, ConvertPolicy::SATURATE);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEArithmeticAddition"
-                               << " Data Type: " << in1->info()->data_type()
-                               << " Input 1 shape: " << in1->info()->tensor_shape()
-                               << " Input 2 shape: " << in2->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(addition);
-}
-
-/* Batch Normalization Layer */
-REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationType::BatchNormalizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(3)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(4)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in       = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *mean     = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
-    auto      *var      = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
-    auto      *beta     = dynamic_cast<arm_compute::ITensor *>(ctx.input(3));
-    auto      *gamma    = dynamic_cast<arm_compute::ITensor *>(ctx.input(4));
-    auto      *out      = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto epsilon  = ctx.parameter<float>("epsilon");
-    const auto act_info = ctx.parameter<ActivationLayerInfo>("act_info");
-
-    // Create and configure function
-    auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEBatchNormalizationLayer>();
-    batch_norm->configure(in, out, mean, var, beta, gamma, epsilon, act_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEBatchNormalizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Mean shape: " << mean->info()->tensor_shape()
-                               << " Var shape: " << var->info()->tensor_shape()
-                               << " Beta shape: " << beta->info()->tensor_shape()
-                               << " Gamma shape: " << gamma->info()->tensor_shape()
-                               << " Epsilon: " << epsilon
-                               << " Activation function: " << act_info.activation()
-                               << " a: " << act_info.a()
-                               << " b: " << act_info.b()
-                               << std::endl);
-
-    return std::move(batch_norm);
-}
-
-/* DepthConvertLayer Layer */
-REGISTER_SIMPLE_OPERATION(NEDepthConvertLayerOperation, NEON, OperationType::DepthConvertLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in          = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *out         = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
-    const auto shift       = ctx.parameter<uint32_t>("shift");
-
-    // Create and configure function
-    auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthConvertLayer>();
-    depthconvert->configure(in, out, conv_policy, shift);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDepthConvertLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " shift: " << shift
-                               << std::endl);
-
-    return std::move(depthconvert);
-}
-
-/* DepthwiseConvolutionLayer Layer */
-REGISTER_SIMPLE_OPERATION(NEDepthwiseConvolutionOperation, NEON, OperationType::DepthwiseConvolutionLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 2 && ctx.num_inputs() != 3);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in        = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *weights   = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
-    auto      *biases    = ctx.num_inputs() == 3 ? dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) : nullptr;
-    auto      *out       = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto conv_info = ctx.parameter<PadStrideInfo>("ConvolutionInfo");
-    const auto opt3x3    = ctx.parameter<bool>("Optimized3x3");
-
-    // Create and configure function
-    std::unique_ptr<arm_compute::IFunction> func;
-    bool                                    run_3x3_opt = opt3x3 && weights->info()->dimension(0) == 3;
-    if(run_3x3_opt)
-    {
-        auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>();
-        depwthwise_conv->configure(in, weights, biases, out, conv_info);
-        func = std::move(depwthwise_conv);
-    }
-    else
-    {
-        auto depwthwise_conv = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
-        depwthwise_conv->configure(in, weights, biases, out, conv_info);
-        func = std::move(depwthwise_conv);
-    }
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDepthwiseConvolutionLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape());
-    if(biases == nullptr)
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: No biases provided" << std::endl);
-    }
-    else
-    {
-        ARM_COMPUTE_LOG_GRAPH_INFO(" Biases shape: " << biases->info()->tensor_shape() << std::endl);
-    }
-
-    return func;
-}
-
-/* DeQuantizationLayer Layer */
-REGISTER_SIMPLE_OPERATION(NEDequantizationLayerOperation, NEON, OperationType::DequantizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(1)) == nullptr);
-
-    // Extract IO and info
-    auto *in      = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *out     = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    auto *min_max = dynamic_cast<arm_compute::ITensor *>(ctx.output(1));
-
-    // Create and configure function
-    auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::NEDequantizationLayer>();
-    dequantization->configure(in, out, min_max);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDequantizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Min max shape: " << min_max->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(dequantization);
-}
-
-/* Flatten Layer */
-REGISTER_SIMPLE_OPERATION(NEFlattenLayerOperation, NEON, OperationType::FlattenLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::NEFlattenLayer>();
-    flatten->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFlattenLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(flatten);
-}
-
-/* Floor Layer */
-REGISTER_SIMPLE_OPERATION(NEFloorLayerOperation, NEON, OperationType::FloorLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto floor = arm_compute::support::cpp14::make_unique<arm_compute::NEFloor>();
-    floor->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFloorLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(floor);
-}
-
-/* Fully Connected Layer */
-REGISTER_SIMPLE_OPERATION(NEFullyConnectedLayer, NEON, OperationType::FullyConnectedLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(1)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(2)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in      = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *weights = dynamic_cast<arm_compute::ITensor *>(ctx.input(1));
-    auto *biases  = dynamic_cast<arm_compute::ITensor *>(ctx.input(2));
-    auto *out     = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto fc = arm_compute::support::cpp14::make_unique<arm_compute::NEFullyConnectedLayer>();
-    fc->configure(in, weights, biases, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFullyConnectedLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Biases Shape: " << biases->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(fc);
-}
-
-/* L2 Normalize Layer */
-REGISTER_SIMPLE_OPERATION(NEL2NormalizeLayerOperation, NEON, OperationType::L2NormalizeLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in      = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *out     = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto axis    = ctx.parameter<unsigned int>("axis");
-    const auto epsilon = ctx.parameter<float>("epsilon");
-
-    // Create and configure function
-    auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::NEL2NormalizeLayer>();
-    l2_norm->configure(in, out, axis, epsilon);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEL2NormalizeLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Axis: " << axis
-                               << " Epsilon: " << epsilon
-                               << std::endl);
-
-    return std::move(l2_norm);
-}
-
-/* Normalization Layer */
-REGISTER_SIMPLE_OPERATION(NENormalizationLayerOperation, NEON, OperationType::NormalizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in        = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *out       = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
-
-    // Create and configure function
-    auto norm = arm_compute::support::cpp14::make_unique<arm_compute::NENormalizationLayer>();
-    norm->configure(in, out, norm_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NENormalizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Normalization info: " << norm_info
-                               << std::endl);
-
-    return std::move(norm);
-}
-
-/* Pooling Layer */
-REGISTER_SIMPLE_OPERATION(NEPoolingLayerOperation, NEON, OperationType::PoolingLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto      *in        = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto      *out       = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-    const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
-
-    // Create and configure function
-    auto pool = arm_compute::support::cpp14::make_unique<arm_compute::NEPoolingLayer>();
-    pool->configure(in, out, pool_info);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEPoolingLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << " Pooling info: " << pool_info
-                               << std::endl);
-
-    return std::move(pool);
-}
-
-/* Quantization Layer */
-REGISTER_SIMPLE_OPERATION(NEQuantizationLayerOperation, NEON, OperationType::QuantizationLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::NEQuantizationLayer>();
-    quantization->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEQuantizationLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(quantization);
-}
-
-/* Reshape Layer */
-REGISTER_SIMPLE_OPERATION(NEReshapeLayerOperation, NEON, OperationType::ReshapeLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::NEReshapeLayer>();
-    reshape->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEReshapeLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(reshape);
-}
-
-/* Softmax Layer */
-REGISTER_SIMPLE_OPERATION(NESoftmaxLayerOperation, NEON, OperationType::SoftmaxLayer)
-{
-    ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
-    ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
-    ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
-
-    // Extract IO and info
-    auto *in  = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
-    auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
-
-    // Create and configure function
-    auto smx = arm_compute::support::cpp14::make_unique<arm_compute::NESoftmaxLayer>();
-    smx->configure(in, out);
-
-    // Log info
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NESoftmaxLayer"
-                               << " Data Type: " << in->info()->data_type()
-                               << " Input shape: " << in->info()->tensor_shape()
-                               << " Output shape: " << out->info()->tensor_shape()
-                               << std::endl);
-
-    return std::move(smx);
-}
diff --git a/src/graph2/printers/DotGraphPrinter.cpp b/src/graph/printers/DotGraphPrinter.cpp
similarity index 94%
rename from src/graph2/printers/DotGraphPrinter.cpp
rename to src/graph/printers/DotGraphPrinter.cpp
index 04987ee..47b1bb5 100644
--- a/src/graph2/printers/DotGraphPrinter.cpp
+++ b/src/graph/printers/DotGraphPrinter.cpp
@@ -21,17 +21,17 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph2/printers/DotGraphPrinter.h"
+#include "arm_compute/graph/printers/DotGraphPrinter.h"
 
 #include "arm_compute/core/Error.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Tensor.h"
-#include "arm_compute/graph2/TypePrinter.h"
-#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/TypePrinter.h"
+#include "arm_compute/graph/nodes/Nodes.h"
 
 namespace arm_compute
 {
-namespace graph2
+namespace graph
 {
 void DotGraphVisitor::visit(ActivationLayerNode &n)
 {
@@ -169,5 +169,5 @@
         }
     }
 }
-} // namespace graph2
+} // namespace graph
 } // namespace arm_compute
diff --git a/src/graph2/Graph.cpp b/src/graph2/Graph.cpp
deleted file mode 100644
index ead67bc..0000000
--- a/src/graph2/Graph.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/Graph.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-Graph::Graph(GraphID id, std::string name)
-    : _id(id), _name(std::move(name)), _nodes(), _edges(), _tensors(), _tagged_nodes(), _mtx()
-{
-}
-
-bool Graph::remove_node(NodeID nid)
-{
-    if(nid >= _nodes.size())
-    {
-        return false;
-    }
-
-    std::unique_ptr<INode> &node = _nodes[nid];
-
-    // Remove node connections
-    if(node)
-    {
-        for(auto &input_eid : node->_input_edges)
-        {
-            remove_connection(input_eid);
-        }
-        for(auto &outpud_eid : node->_output_edges)
-        {
-            remove_connection(outpud_eid);
-        }
-    }
-
-    node = nullptr;
-
-    return true;
-}
-
-EdgeID Graph::add_connection(NodeID source, size_t source_idx, NodeID sink, size_t sink_idx)
-{
-    std::lock_guard<arm_compute::Mutex> lock(_mtx);
-
-    // Check if node index is valid, if node exists and finally if the connection index is valid
-    ARM_COMPUTE_ERROR_ON((source >= _nodes.size()) || (_nodes[source] == nullptr) || (source_idx >= _nodes[source]->num_outputs()));
-    ARM_COMPUTE_ERROR_ON((sink >= _nodes.size()) || (_nodes[sink] == nullptr) || (sink_idx >= _nodes[sink]->num_inputs()));
-
-    // Get nodes
-    std::unique_ptr<INode> &source_node = _nodes[source];
-    std::unique_ptr<INode> &sink_node   = _nodes[sink];
-
-    // Check for duplicate connections (Check only sink node)
-    Edge *sink_node_edge = sink_node->input_edge(sink_idx);
-    if((sink_node_edge != nullptr) && (sink_node_edge->producer_id() == source) && (sink_node_edge->producer_idx() == source_idx)
-       && (sink_node_edge->consumer_id() == sink) && (sink_node_edge->consumer_idx() == sink_idx))
-    {
-        return sink_node_edge->id();
-    }
-
-    // Check if there is already a tensor associated with output if not create one
-    TensorID tid = source_node->output_id(source_idx);
-    if(tid == NullTensorID)
-    {
-        tid = create_tensor();
-    }
-    std::unique_ptr<Tensor> &tensor = _tensors[tid];
-
-    // Create connections
-    EdgeID eid        = _edges.size();
-    auto   connection = arm_compute::support::cpp14::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
-    _edges.push_back(std::move(connection));
-
-    // Add connections to source and sink nodes
-    source_node->_output_edges.insert(eid);
-    sink_node->_input_edges[sink_idx] = eid;
-
-    // Set tensor output node
-    source_node->_outputs[source_idx] = tid;
-
-    // Bind tensor to the edge
-    tensor->bind_edge(eid);
-
-    // Try and propagate shapes in sink node
-    sink_node->forward_descriptors();
-
-    return eid;
-}
-
-bool Graph::remove_connection(EdgeID eid)
-{
-    if(eid >= _edges.size())
-    {
-        return false;
-    }
-
-    std::unique_ptr<Edge> &edge = _edges[eid];
-
-    // Remove node connections
-    if(edge != nullptr)
-    {
-        // Get tensor bound to the edge
-        if(edge->tensor() != nullptr)
-        {
-            edge->tensor()->unbind_edge(eid);
-        }
-
-        // Remove edges from source node
-        if(edge->producer() != nullptr)
-        {
-            edge->producer()->_output_edges.erase(eid);
-        }
-
-        // Remove edges from sink node
-        if((edge->consumer() != nullptr) && (edge->consumer_idx() < edge->consumer()->_input_edges.size()))
-        {
-            edge->consumer()->_input_edges[edge->consumer_idx()] = EmptyEdgeID;
-        }
-    }
-
-    // Clear edge
-    edge = nullptr;
-
-    return true;
-}
-
-TensorID Graph::create_tensor(TensorDescriptor desc)
-{
-    TensorID tid    = _tensors.size();
-    auto     tensor = support::cpp14::make_unique<Tensor>(tid, desc);
-    _tensors.push_back(std::move(tensor));
-
-    return tid;
-}
-
-std::string Graph::name() const
-{
-    return _name;
-}
-
-GraphID Graph::id() const
-{
-    return _id;
-}
-
-const std::vector<NodeID> &Graph::inputs()
-{
-    return _tagged_nodes[NodeType::Input];
-}
-
-std::vector<std::unique_ptr<INode>> &Graph::nodes()
-{
-    return _nodes;
-}
-
-const std::vector<std::unique_ptr<INode>> &Graph::nodes() const
-{
-    return _nodes;
-}
-
-const std::vector<std::unique_ptr<Edge>> &Graph::edges() const
-{
-    return _edges;
-}
-
-std::vector<std::unique_ptr<Tensor>> &Graph::tensors()
-{
-    return _tensors;
-}
-
-const std::vector<std::unique_ptr<Tensor>> &Graph::tensors() const
-{
-    return _tensors;
-}
-
-const INode *Graph::node(NodeID id) const
-{
-    return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
-}
-
-INode *Graph::node(NodeID id)
-{
-    return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
-}
-
-const Edge *Graph::edge(EdgeID id) const
-{
-    return (id >= _edges.size()) ? nullptr : _edges[id].get();
-}
-
-Edge *Graph::edge(EdgeID id)
-{
-    return (id >= _edges.size()) ? nullptr : _edges[id].get();
-}
-
-const Tensor *Graph::tensor(TensorID id) const
-{
-    return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
-}
-
-Tensor *Graph::tensor(TensorID id)
-{
-    return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
-}
-} // namespace graph2
-} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/GraphContext.cpp b/src/graph2/GraphContext.cpp
deleted file mode 100644
index 08a7b68..0000000
--- a/src/graph2/GraphContext.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/GraphContext.h"
-#include <arm_compute/graph2.h>
-
-namespace arm_compute
-{
-namespace graph2
-{
-GraphContext::GraphContext()
-    : _config(), _memory_managers()
-{
-}
-
-const GraphConfig &GraphContext::config() const
-{
-    return _config;
-}
-
-void GraphContext::set_config(const GraphConfig &config)
-{
-    _config = config;
-}
-
-bool GraphContext::insert_memory_management_ctx(MemoryManagerContext &&memory_ctx)
-{
-    Target target = memory_ctx.target;
-    if(target == Target::UNSPECIFIED || _memory_managers.find(target) != std::end(_memory_managers))
-    {
-        return false;
-    }
-
-    _memory_managers[target] = std::move(memory_ctx);
-    return true;
-}
-
-MemoryManagerContext *GraphContext::memory_management_ctx(Target target)
-{
-    return (_memory_managers.find(target) != std::end(_memory_managers)) ? &_memory_managers[target] : nullptr;
-}
-
-void GraphContext::finalize()
-{
-    for(auto &mm_obj : _memory_managers)
-    {
-        if(mm_obj.second.mm != nullptr)
-        {
-            mm_obj.second.mm->finalize();
-        }
-    }
-}
-} // namespace graph2
-} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/INode.cpp b/src/graph2/INode.cpp
deleted file mode 100644
index 28be341..0000000
--- a/src/graph2/INode.cpp
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/INode.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/graph2/Edge.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/Tensor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-// *INDENT-OFF*
-// clang-format off
-INode::INode()
-    : _graph(nullptr), _id(EmptyNodeID), _common_params({ "", Target::UNSPECIFIED}),
-      _outputs(), _input_edges(), _output_edges(), _assigned_target(Target::UNSPECIFIED)
-{
-}
-// clang-format on
-// *INDENT-ON*
-
-void INode::set_graph(Graph *g)
-{
-    ARM_COMPUTE_ERROR_ON(g == nullptr);
-    _graph = g;
-}
-
-void INode::set_id(NodeID id)
-{
-    _id = id;
-}
-
-void INode::set_common_node_parameters(NodeParams common_params)
-{
-    _common_params = std::move(common_params);
-}
-
-void INode::set_requested_target(Target target)
-{
-    _common_params.target = target;
-}
-
-void INode::set_assigned_target(Target target)
-{
-    _assigned_target = target;
-}
-
-void INode::set_output_tensor(TensorID tid, size_t idx)
-{
-    if(tid != NullTensorID && (idx < _outputs.size()) && (_graph->tensor(tid) != nullptr))
-    {
-        ARM_COMPUTE_ERROR_ON(_graph == nullptr);
-        Tensor *updated_tensor = _graph->tensor(tid);
-        _outputs[idx]          = tid;
-
-        // Set tensor to all output edges of the node
-        for(auto &output_edge_id : _output_edges)
-        {
-            auto output_edge = _graph->edge(output_edge_id);
-            if(output_edge != nullptr)
-            {
-                // Unbind edge from current tensor
-                auto current_output_tensor = output_edge->tensor();
-                current_output_tensor->unbind_edge(output_edge->id());
-
-                // Update tensor to edge and rebind tensor
-                output_edge->update_bound_tensor(updated_tensor);
-                updated_tensor->bind_edge(output_edge->id());
-            }
-        }
-    }
-}
-
-NodeID INode::id() const
-{
-    return _id;
-}
-
-std::string INode::name() const
-{
-    return _common_params.name;
-}
-
-const Graph *INode::graph() const
-{
-    return _graph;
-}
-
-Graph *INode::graph()
-{
-    return _graph;
-}
-
-const std::vector<TensorID> &INode::outputs() const
-{
-    return _outputs;
-}
-
-const std::vector<EdgeID> &INode::input_edges() const
-{
-    return _input_edges;
-}
-
-const std::set<EdgeID> &INode::output_edges() const
-{
-    return _output_edges;
-}
-
-TensorID INode::input_id(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
-    Edge *e = _graph->edge(_input_edges[idx]);
-    return (e != nullptr) ? e->tensor_id() : NullTensorID;
-}
-
-TensorID INode::output_id(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
-    return _outputs[idx];
-}
-
-Tensor *INode::input(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
-    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
-    Edge *e = _graph->edge(_input_edges[idx]);
-    return (e != nullptr) ? e->tensor() : nullptr;
-}
-
-Tensor *INode::output(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
-    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
-    return _graph->tensor(_outputs[idx]);
-}
-
-EdgeID INode::input_edge_id(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
-    return _input_edges[idx];
-}
-
-Edge *INode::input_edge(size_t idx) const
-{
-    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
-    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
-    return _graph->edge(_input_edges[idx]);
-}
-
-size_t INode::num_inputs() const
-{
-    return _input_edges.size();
-}
-
-size_t INode::num_outputs() const
-{
-    return _outputs.size();
-}
-
-Target INode::requested_target() const
-{
-    return _common_params.target;
-}
-
-Target INode::assigned_target() const
-{
-    return _assigned_target;
-}
-} // namespace graph2
-} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/Tensor.cpp b/src/graph2/Tensor.cpp
deleted file mode 100644
index c6054d7..0000000
--- a/src/graph2/Tensor.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/Tensor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-Tensor::Tensor(TensorID id, TensorDescriptor desc)
-    : _id(id), _desc(desc), _handle(nullptr), _accessor(nullptr), _bound_edges()
-{
-}
-
-TensorID Tensor::id() const
-{
-    return _id;
-}
-
-TensorDescriptor &Tensor::desc()
-{
-    return _desc;
-}
-
-const TensorDescriptor &Tensor::desc() const
-{
-    return _desc;
-}
-
-void Tensor::set_handle(std::unique_ptr<ITensorHandle> backend_tensor)
-{
-    _handle = std::move(backend_tensor);
-}
-
-ITensorHandle *Tensor::handle()
-{
-    return _handle.get();
-}
-
-void Tensor::set_accessor(std::unique_ptr<ITensorAccessor> accessor)
-{
-    _accessor = std::move(accessor);
-}
-
-ITensorAccessor *Tensor::accessor()
-{
-    return _accessor.get();
-}
-
-bool Tensor::call_accessor()
-{
-    // Early exit guard
-    if(!_accessor || !_handle)
-    {
-        return false;
-    }
-
-    // Map tensor
-    _handle->map(true);
-
-    // Return in case of null backend buffer
-    if(_handle->tensor().buffer() == nullptr)
-    {
-        return false;
-    }
-
-    // Call accessor
-    _accessor->access_tensor(_handle->tensor());
-
-    // Unmap tensor
-    _handle->unmap();
-
-    return true;
-}
-
-void Tensor::bind_edge(EdgeID eid)
-{
-    _bound_edges.insert(eid);
-}
-
-void Tensor::unbind_edge(EdgeID eid)
-{
-    _bound_edges.erase(eid);
-}
-
-const std::set<EdgeID> Tensor::bound_edges() const
-{
-    return _bound_edges;
-}
-} // namespace graph2
-} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/FullyConnectedLayer.cpp b/src/graph2/nodes/FullyConnectedLayer.cpp
deleted file mode 100644
index 195adc4..0000000
--- a/src/graph2/nodes/FullyConnectedLayer.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/nodes/FullyConnectedLayerNode.h"
-
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
-    : _num_outputs(num_outputs)
-{
-    _input_edges.resize(3, EmptyEdgeID);
-    _outputs.resize(1, NullTensorID);
-}
-
-TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
-{
-    unsigned int num_weights    = 1;
-    unsigned int num_dimensions = input_shape.num_dimensions();
-    // Ignore the batch dimension if there is one:
-    if(num_dimensions == 2 || num_dimensions == 4)
-    {
-        num_dimensions--;
-    }
-    for(unsigned int i = 0; i < num_dimensions; i++)
-    {
-        num_weights *= input_shape[i];
-    }
-    return TensorShape(num_weights, num_outputs);
-}
-
-TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
-{
-    // Note: Only 1D batch space is supported at the moment
-    unsigned int batches = input_shape[1];
-    if(input_shape.num_dimensions() > 2)
-    {
-        batches = input_shape[3];
-    }
-    return TensorShape(num_outputs, batches);
-}
-
-bool FullyConnectedLayerNode::forward_descriptors()
-{
-    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
-    {
-        Tensor *dst = output(0);
-        ARM_COMPUTE_ERROR_ON(dst == nullptr);
-        dst->desc() = configure_output(0);
-        return true;
-    }
-    return false;
-}
-
-TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
-{
-    ARM_COMPUTE_UNUSED(idx);
-    const Tensor *src = input(0);
-    ARM_COMPUTE_ERROR_ON(src == nullptr);
-
-    TensorDescriptor output_info  = src->desc();
-    TensorShape      output_shape = compute_output_shape(src->desc().shape, _num_outputs);
-    output_info.shape             = output_shape;
-    return output_info;
-}
-
-Status FullyConnectedLayerNode::validate()
-{
-    return Status{};
-}
-
-NodeType FullyConnectedLayerNode::type() const
-{
-    return NodeType::FullyConnectedLayer;
-}
-
-void FullyConnectedLayerNode::accept(INodeVisitor &v)
-{
-    v.visit(*this);
-}
-} // namespace graph2
-} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/ReshapeLayer.cpp b/src/graph2/nodes/ReshapeLayer.cpp
deleted file mode 100644
index 6280eea..0000000
--- a/src/graph2/nodes/ReshapeLayer.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "arm_compute/graph2/nodes/ReshapeLayerNode.h"
-
-#include "arm_compute/graph2/Graph.h"
-#include "arm_compute/graph2/INodeVisitor.h"
-
-namespace arm_compute
-{
-namespace graph2
-{
-ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
-    : _shape(shape)
-{
-    _input_edges.resize(1, EmptyEdgeID);
-    _outputs.resize(1, NullTensorID);
-}
-
-bool ReshapeLayerNode::forward_descriptors()
-{
-    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
-    {
-        Tensor *dst = output(0);
-        ARM_COMPUTE_ERROR_ON(dst == nullptr);
-        dst->desc() = configure_output(0);
-        return true;
-    }
-    return false;
-}
-
-TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
-{
-    ARM_COMPUTE_UNUSED(idx);
-    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
-
-    const Tensor *src = input(0);
-    ARM_COMPUTE_ERROR_ON(src == nullptr);
-
-    TensorDescriptor output_desc = src->desc();
-    output_desc.shape            = _shape;
-
-    return output_desc;
-}
-
-Status ReshapeLayerNode::validate()
-{
-    return Status{};
-}
-
-NodeType ReshapeLayerNode::type() const
-{
-    return NodeType::ReshapeLayer;
-}
-
-void ReshapeLayerNode::accept(INodeVisitor &v)
-{
-    v.visit(*this);
-}
-} // namespace graph2
-} // namespace arm_compute
\ No newline at end of file
diff --git a/src/runtime/NEON/functions/NEWinogradLayer.cpp b/src/runtime/NEON/functions/NEWinogradLayer.cpp
index 0ac6d09..0a344f0 100644
--- a/src/runtime/NEON/functions/NEWinogradLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradLayer.cpp
@@ -248,7 +248,7 @@
 Status NEWinogradLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, biases, output);
-    ARM_COMPUTE_RETURN_ERROR_ON(validate_arguments(input, weights, biases, output, conv_info));
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
 
     return Status{};
 }