COMPMID-793 : Add graph intermediate representation

Change-Id: Ic1685de4e19e0ac79669ef2da64e1dc96c7ea0bf
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/115248
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/SConscript b/SConscript
index 6f7ea3c..dab807a 100644
--- a/SConscript
+++ b/SConscript
@@ -170,6 +170,10 @@
 # CLHarrisCorners uses the Scheduler to run CPP kernels
 runtime_files += Glob('src/runtime/CPP/SingleThreadScheduler.cpp')
 
+# FIXME : Rename graph2 -> graph
+graph2_files = Glob('src/graph2/*.cpp')
+graph2_files += Glob('src/graph2/*/*.cpp')
+
 if env['cppthreads']:
      runtime_files += Glob('src/runtime/CPP/CPPScheduler.cpp')
 
@@ -183,6 +187,9 @@
     runtime_files += Glob('src/runtime/CL/*.cpp')
     runtime_files += Glob('src/runtime/CL/functions/*.cpp')
 
+    graph2_files += Glob('src/graph2/backends/CL/*.cpp')
+
+
 if env['neon']:
     core_files += Glob('src/core/NEON/*.cpp')
     core_files += Glob('src/core/NEON/kernels/*.cpp')
@@ -192,6 +199,8 @@
     core_files += Glob('src/core/NEON/kernels/convolution/winograd/*/*.cpp')
     arm_compute_env.Append(CPPPATH = ["arm_compute/core/NEON/kernels/winograd/", "arm_compute/core/NEON/kernels/assembly/"])
 
+    graph2_files += Glob('src/graph2/backends/NEON/*.cpp')
+
     if env['arch'] == "armv7a":
         core_files += Glob('src/core/NEON/kernels/arm32/*.cpp')
 
@@ -226,6 +235,14 @@
     Depends(arm_compute_so, arm_compute_core_so)
     Export('arm_compute_so')
 
+arm_compute_graph2_a = build_library('arm_compute_graph2-static', graph2_files, static=True, libs = [ arm_compute_a])
+Export('arm_compute_graph2_a')
+
+if env['os'] != 'bare_metal' and not env['standalone']:
+    arm_compute_graph2_so = build_library('arm_compute_graph2', graph2_files, static=False, libs = [ "arm_compute" , "arm_compute_core"])
+    Depends(arm_compute_graph2_so, arm_compute_so)
+    Export('arm_compute_graph2_so')
+
 if env['neon'] and env['opencl']:
     Import('opencl')
     graph_files = Glob('src/graph/*.cpp')
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index 56c7ccd..f178936 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -29,6 +29,16 @@
 
 namespace arm_compute
 {
+/** Ignores unused arguments
+ *
+ * @tparam T Argument types
+ */
+template <typename... T>
+inline void ignore_unused(T &&...)
+{
+}
+
+/** Available error codes */
 enum class ErrorCode
 {
     OK,           /**< No error */
@@ -142,9 +152,9 @@
  * This is useful if for example a variable is only used
  * in debug builds and generates a warning in release builds.
  *
- * @param[in] var Variable which is unused.
+ * @param[in] ... Variables which are unused.
  */
-#define ARM_COMPUTE_UNUSED(var) (void)(var)
+#define ARM_COMPUTE_UNUSED(...) ignore_unused(__VA_ARGS__) // NOLINT
 
 /** Creates an error with a given message
  *
diff --git a/arm_compute/core/FixedPoint.inl b/arm_compute/core/FixedPoint.inl
index 9c7e35a..eb3516e 100644
--- a/arm_compute/core/FixedPoint.inl
+++ b/arm_compute/core/FixedPoint.inl
@@ -22,7 +22,7 @@
  * SOFTWARE.
  */
 #include "arm_compute/core/Error.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <cmath>
 #include <limits>
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index 24ba521..1554f63 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -33,7 +33,6 @@
 #include "arm_compute/core/TensorShape.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/utility.h"
 
 #include <array>
 #include <cstddef>
diff --git a/arm_compute/core/ITensorInfo.h b/arm_compute/core/ITensorInfo.h
index 167fb41..ce0cf53 100644
--- a/arm_compute/core/ITensorInfo.h
+++ b/arm_compute/core/ITensorInfo.h
@@ -30,7 +30,7 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/utils/misc/ICloneable.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <cstddef>
 
diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h
index dc836c9..d5532e8 100644
--- a/arm_compute/core/TensorShape.h
+++ b/arm_compute/core/TensorShape.h
@@ -26,7 +26,7 @@
 
 #include "arm_compute/core/Dimensions.h"
 #include "arm_compute/core/Error.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <algorithm>
 #include <array>
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 143ee02..ae88e60 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1076,5 +1076,5 @@
     DIRECT,  /**< Direct convolution */
     WINOGRAD /**< Convolution using Winograd */
 };
-}
+} // namespace arm_compute
 #endif /* __ARM_COMPUTE_TYPES_H__ */
diff --git a/arm_compute/core/utils/logging/Types.h b/arm_compute/core/utils/logging/Types.h
index 171270d..0b40e3d 100644
--- a/arm_compute/core/utils/logging/Types.h
+++ b/arm_compute/core/utils/logging/Types.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -36,6 +36,7 @@
     VERBOSE, /**< All logging messages */
     INFO,    /**< Information log level */
     WARN,    /**< Warning log level */
+    ERROR,   /**< Error log level */
     OFF      /**< No logging */
 };
 
diff --git a/arm_compute/core/utils/misc/CRTP.h b/arm_compute/core/utils/misc/CRTP.h
new file mode 100644
index 0000000..9947312
--- /dev/null
+++ b/arm_compute/core/utils/misc/CRTP.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_MISC_CRTP_H__
+#define __ARM_COMPUTE_MISC_CRTP_H__
+
+namespace arm_compute
+{
+namespace misc
+{
+/** Curiously recurring template pattern Interface */
+template <typename T, template <typename> class Type>
+struct CRTP
+{
+public:
+    using ExactType = T;
+
+protected:
+    const T &impl() const
+    {
+        return static_cast<const T &>(*this);
+    }
+    T &impl()
+    {
+        return static_cast<T &>(*this);
+    }
+
+private:
+    CRTP() = default;
+    friend Type<T>;
+};
+} // namespace misc
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_MISC_CRTP_H__ */
diff --git a/arm_compute/core/utils/misc/Cast.h b/arm_compute/core/utils/misc/Cast.h
new file mode 100644
index 0000000..f6c91dd
--- /dev/null
+++ b/arm_compute/core/utils/misc/Cast.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_MISC_CAST_H__
+#define __ARM_COMPUTE_MISC_CAST_H__
+
+#include "arm_compute/core/Error.h"
+
+namespace arm_compute
+{
+namespace utils
+{
+namespace cast
+{
+/** Polymorphic cast between two types
+ *
+ * @warning Will throw an exception if cast cannot take place
+ *
+ * @tparam Target Target to cast type
+ * @tparam Source Source from cast type
+ *
+ * @param[in] v Value to cast
+ *
+ * @return The casted type
+ */
+template <typename Target, typename Source>
+inline Target polymorphic_cast(Source *v)
+{
+    if(dynamic_cast<Target>(v) == nullptr)
+    {
+        throw std::bad_cast();
+    }
+    return static_cast<Target>(v);
+}
+
+/** Polymorphic down cast between two types
+ *
+ * @warning Will assert if cannot take place
+ *
+ * @tparam Target Target to cast type
+ * @tparam Source Source from cast type
+ *
+ * @param[in] v Value to cast
+ *
+ * @return The casted type
+ */
+template <typename Target, typename Source>
+inline Target polymorphic_downcast(Source *v)
+{
+    ARM_COMPUTE_ERROR_ON(dynamic_cast<Target>(v) != static_cast<Target>(v));
+    return static_cast<Target>(v);
+}
+} // namespace cast
+} // namespace utils
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_MISC_CAST_H__ */
diff --git a/arm_compute/core/utils/misc/Iterable.h b/arm_compute/core/utils/misc/Iterable.h
new file mode 100644
index 0000000..96a650a
--- /dev/null
+++ b/arm_compute/core/utils/misc/Iterable.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_MISC_ITERABLE_H__
+#define __ARM_COMPUTE_MISC_ITERABLE_H__
+
+#include <iterator>
+
+namespace arm_compute
+{
+namespace utils
+{
+namespace iterable
+{
+/** Reverse range iterable class
+ *
+ * @tparam T Type to create a reverse range on
+ */
+template <typename T>
+class reverse_iterable
+{
+public:
+    /** Default constructor
+     *
+     * @param[in] it Value to reverse iterate on
+     */
+    explicit reverse_iterable(T &it)
+        : _it(it)
+    {
+    }
+
+    typename T::reverse_iterator begin()
+    {
+        return _it.rbegin();
+    }
+
+    typename T::reverse_iterator end()
+    {
+        return _it.rend();
+    }
+
+    typename T::const_reverse_iterator cbegin()
+    {
+        return _it.rbegin();
+    }
+
+    typename T::const_reverse_iterator cend()
+    {
+        return _it.rend();
+    }
+
+private:
+    T &_it;
+};
+
+/** Creates a reverse iterable for a given type
+ *
+ * @tparam T Type to create a reverse iterable on
+ *
+ * @param[in] val Iterable input
+ *
+ * @return Reverse iterable container
+ */
+template <typename T>
+reverse_iterable<T> reverse_iterate(T &val)
+{
+    return reverse_iterable<T>(val);
+}
+} // namespace iterable
+} // namespace utils
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_MISC_ITERABLE_H__ */
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 2919625..354f60d 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -174,7 +174,6 @@
 
     return output_shape;
 }
-
 inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorInfo *input, bool transpose_weights, bool is_batched_fc_layer, const int interleave)
 {
     TensorShape output_shape{ input->tensor_shape() };
@@ -194,7 +193,6 @@
 
     return output_shape;
 }
-
 inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const PadStrideInfo &conv_info, const Size2D &kernel_size)
 {
     // Compute height
@@ -212,6 +210,22 @@
 
     return output_shape;
 }
+inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
+{
+    const TensorShape input_shape{ input.tensor_shape() };
+    const TensorShape weights_shape{ weights.tensor_shape() };
+
+    unsigned int output_width  = 0;
+    unsigned int output_height = 0;
+    std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
+
+    TensorShape output_shape{ input_shape };
+    output_shape.set(0, output_width);
+    output_shape.set(1, output_height);
+    output_shape.set(2, weights_shape[3]);
+
+    return output_shape;
+}
 } // namespace shape_calculator
 } // namespace misc
 } // namespace arm_compute
diff --git a/arm_compute/core/utils/misc/utility.h b/arm_compute/core/utils/misc/Utility.h
similarity index 99%
rename from arm_compute/core/utils/misc/utility.h
rename to arm_compute/core/utils/misc/Utility.h
index 8ba9231..639f2e1 100644
--- a/arm_compute/core/utils/misc/utility.h
+++ b/arm_compute/core/utils/misc/Utility.h
@@ -164,7 +164,6 @@
 
     return idx;
 }
-
 } // namespace utility
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_MISC_UTILITY_H__ */
diff --git a/arm_compute/core/utils/strong_type/StrongType.h b/arm_compute/core/utils/strong_type/StrongType.h
new file mode 100644
index 0000000..5a38edb
--- /dev/null
+++ b/arm_compute/core/utils/strong_type/StrongType.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_STRONG_TYPE_STRONG_TYPE_H__
+#define __ARM_COMPUTE_STRONG_TYPE_STRONG_TYPE_H__
+
+#include <type_traits>
+
+namespace arm_compute
+{
+namespace strong_type
+{
+/** Strong type
+ *
+ * @tparam T           Exact type of the Strong Type
+ * @tparam Tag         Tag used to distinguish between types with same T
+ * @tparam Attributes  Attributes of the Strong Type
+ */
+template <typename T, typename Tag, template <typename> class... Attributes>
+class StrongType : public Attributes<StrongType<T, Tag, Attributes...>>...
+{
+public:
+    /** Exact underlying type **/
+    using ExactType = T;
+
+public:
+    /** Default Constructor
+     *
+     * @param[in] val Initialization value
+     */
+    StrongType(T val)
+        : _val(val)
+    {
+    }
+    /** Accessor of the value of the exact type
+     *
+     * @return Exact type value
+     */
+    T &get()
+    {
+        return _val;
+    }
+    /** Accessor of the value of the exact type
+     *
+     * @return Exact type value
+     */
+    const T &get() const
+    {
+        return _val;
+    }
+
+private:
+    T _val = {};
+};
+} // namespace strong_type
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_STRONG_TYPE_STRONG_TYPE_H__ */
diff --git a/arm_compute/core/utils/strong_type/StrongTypeAttributes.h b/arm_compute/core/utils/strong_type/StrongTypeAttributes.h
new file mode 100644
index 0000000..b5ed48f
--- /dev/null
+++ b/arm_compute/core/utils/strong_type/StrongTypeAttributes.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_STRONG_TYPE_STRONG_TYPE_ATTRIBUTES_H__
+#define __ARM_COMPUTE_STRONG_TYPE_STRONG_TYPE_ATTRIBUTES_H__
+
+#include "arm_compute/core/utils/misc/CRTP.h"
+
+namespace arm_compute
+{
+namespace strong_type
+{
+/** Comparable attribute */
+template <typename T>
+struct Comparable : misc::CRTP<T, Comparable>
+{
+    bool operator==(T const &other) const
+    {
+        return this->impl().get() == other.get();
+    }
+    bool operator!=(T const &other) const
+    {
+        return !(*this == other);
+    }
+    bool operator>(T const &other) const
+    {
+        return this->impl().get() > other.get();
+    }
+    bool operator<(T const &other) const
+    {
+        return this->impl().get() < other.get();
+    }
+    bool operator>=(T const &other) const
+    {
+        return !(*this < other);
+    }
+    bool operator<=(T const &other) const
+    {
+        return !(*this > other);
+    }
+};
+} // namespace strong_type
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_STRONG_TYPE_STRONG_TYPE_ATTRIBUTES_H__ */
diff --git a/arm_compute/graph/nodes/BranchLayer.h b/arm_compute/graph/nodes/BranchLayer.h
index 5e4a8d9..cbc016d 100644
--- a/arm_compute/graph/nodes/BranchLayer.h
+++ b/arm_compute/graph/nodes/BranchLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -31,7 +31,7 @@
 #include "arm_compute/graph/SubTensor.h"
 #include "arm_compute/graph/Types.h"
 
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <vector>
 
diff --git a/arm_compute/graph/nodes/ResidualLayer.h b/arm_compute/graph/nodes/ResidualLayer.h
index 1eecf6f..27e0501 100644
--- a/arm_compute/graph/nodes/ResidualLayer.h
+++ b/arm_compute/graph/nodes/ResidualLayer.h
@@ -30,7 +30,7 @@
 #include "arm_compute/graph/SubGraph.h"
 #include "arm_compute/graph/Types.h"
 
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <vector>
 
diff --git a/arm_compute/graph2.h b/arm_compute/graph2.h
new file mode 100644
index 0000000..5f54990
--- /dev/null
+++ b/arm_compute/graph2.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_H__
+#define __ARM_COMPUTE_GRAPH2_H__
+
+// IR
+#include "arm_compute/graph2/Edge.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphBuilder.h"
+#include "arm_compute/graph2/IDeviceBackend.h"
+#include "arm_compute/graph2/IGraphMutator.h"
+#include "arm_compute/graph2/IGraphPrinter.h"
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/TensorDescriptor.h"
+#include "arm_compute/graph2/TypePrinter.h"
+#include "arm_compute/graph2/Types.h"
+
+// Algorithms
+#include "arm_compute/graph2/algorithms/Algorithms.h"
+#include "arm_compute/graph2/mutators/GraphMutators.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/graph2/printers/Printers.h"
+
+// Frontend
+#include "arm_compute/graph2/frontend/IStreamOperators.h"
+#include "arm_compute/graph2/frontend/Layers.h"
+#include "arm_compute/graph2/frontend/Stream.h"
+#include "arm_compute/graph2/frontend/SubStream.h"
+#include "arm_compute/graph2/frontend/Types.h"
+
+#endif /* __ARM_COMPUTE_GRAPH2_H__ */
diff --git a/arm_compute/graph2/Edge.h b/arm_compute/graph2/Edge.h
new file mode 100644
index 0000000..e82bcb2
--- /dev/null
+++ b/arm_compute/graph2/Edge.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_EDGE_H__
+#define __ARM_COMPUTE_GRAPH2_EDGE_H__
+
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+
+/** Graph Edge */
+class Edge final
+{
+public:
+    /** Default Constructor
+     *
+     * @param[in] id           Edge id
+     * @param[in] producer     Producer node id
+     * @param[in] producer_idx Producer node output index
+     * @param[in] consumer     Consumer node id
+     * @param[in] consumer_idx Consumer node input index
+     * @param[in] tensor       Tensor associated with the edge
+     */
+    Edge(EdgeID id, INode *producer, unsigned int producer_idx, INode *consumer, unsigned int consumer_idx, Tensor *tensor)
+        : _id(id), _producer(producer), _consumer(consumer), _producer_idx(producer_idx), _consumer_idx(consumer_idx), _tensor(tensor)
+
+    {
+    }
+    /** Returns edge id
+     *
+     * @return Edge id
+     */
+    EdgeID id() const
+    {
+        return _id;
+    }
+    /** Returns producer node id
+     *
+     * @return Producer node id
+     */
+    NodeID producer_id() const
+    {
+        return (_producer == nullptr) ? EmptyNodeID : _producer->id();
+    }
+    /** Returns sink node id
+     *
+     * @return Sink node id
+     */
+    NodeID consumer_id() const
+    {
+        return (_consumer == nullptr) ? EmptyNodeID : _consumer->id();
+    }
+    /** Returns producer node
+     *
+     * @return Producer node
+     */
+    INode *producer() const
+    {
+        return _producer;
+    }
+    /** Returns consumer node
+     *
+     * @return Consumer node
+     */
+    INode *consumer() const
+    {
+        return _consumer;
+    }
+    /** Returns the index of the output that produces the result in the producer node
+     *
+     * @return Producer node output index
+     */
+    unsigned int producer_idx() const
+    {
+        return _producer_idx;
+    }
+    /** Returns the index of the input that consumes the result in the consumer node
+     *
+     * @return Consumer node input index
+     */
+    unsigned int consumer_idx() const
+    {
+        return _consumer_idx;
+    }
+    /** Returns the tensor associated with this edge
+     *
+     * @return Tensor id
+     */
+    Tensor *tensor() const
+    {
+        return _tensor;
+    }
+    /** Returns the tensor id associated with this edge
+     *
+     * @return Tensor id
+     */
+    TensorID tensor_id() const
+    {
+        return (_tensor == nullptr) ? NullTensorID : _tensor->id();
+    }
+    /** Bind the edge to another tensor
+     *
+     * @note If tensor is nullptr then nothing happens
+     *
+     * @param[in] tensor Tensor to bind the edge to
+     */
+    void update_bound_tensor(Tensor *tensor)
+    {
+        _tensor = (tensor != nullptr) ? tensor : _tensor;
+    }
+
+private:
+    friend class Graph;
+
+private:
+    EdgeID       _id;
+    INode       *_producer;
+    INode       *_consumer;
+    unsigned int _producer_idx;
+    unsigned int _consumer_idx;
+    Tensor      *_tensor;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_EDGE_H__ */
diff --git a/arm_compute/graph2/Graph.h b/arm_compute/graph2/Graph.h
new file mode 100644
index 0000000..a601598
--- /dev/null
+++ b/arm_compute/graph2/Graph.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_GRAPH_H__
+#define __ARM_COMPUTE_GRAPH2_GRAPH_H__
+
+#include "arm_compute/graph2/Edge.h"
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/Types.h"
+
+#include "support/Mutex.h"
+#include "support/ToolchainSupport.h"
+
+#include <map>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Graph class
+ *
+ * Represents a multiple source - multiple sink directed graph
+ */
+class Graph final
+{
+public:
+    Graph() = default;
+    /** Constructor
+     *
+     * @param[in] id   Graph identification number. Can be used to differentiate between graphs. Default value 0
+     * @param[in] name Graph name. Default value empty string
+     */
+    Graph(GraphID id, std::string name);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    Graph(const Graph &) = delete;
+    /** Prevent instances of this class from being copy assigned (As this class contains pointers) */
+    Graph &operator=(const Graph &) = delete;
+    /** Allow instances of this class to be moved */
+    Graph(Graph &&) = default;
+    /** Allow instances of this class to be move assigned */
+    Graph &operator=(Graph &&) = default;
+    /** Adds a node to the graph
+     *
+     * @note Models a single output node
+     *
+     * @tparam NT Node operation
+     * @tparam Ts Arguments to operation
+     *
+     * @param args Node arguments
+     *
+     * @return ID of the node
+     */
+    template <typename NT, typename... Ts>
+    NodeID add_node(Ts &&... args);
+    /** Remove the node with the given ID
+     *
+     * @param[in] nid ID of the node to remove
+     *
+     * @return True if the removal took place else false
+     */
+    bool remove_node(NodeID nid);
+    /** Adds a connection between two nodes
+     *
+     * @param[in] source     ID of the source node
+     * @param[in] source_idx Output index of the source node
+     * @param[in] sink       ID of the sink node
+     * @param[in] sink_idx   Input index of the sink node
+     *
+     * @return ID of this connection
+     */
+    EdgeID add_connection(NodeID source, size_t source_idx, NodeID sink, size_t sink_idx);
+    /** Removes an edge (connection)
+     *
+     * @param[in] eid Connection to remove
+     *
+     * @return True if the removal took place else false
+     */
+    bool remove_connection(EdgeID eid);
+    /** Returns graph name
+     *
+     * @return Graph name
+     */
+    std::string name() const;
+    /** Returns graph id
+     *
+     * @return Graph id
+     */
+    GraphID id() const;
+    /** Returns graph input nodes
+     *
+     * @return vector containing the graph inputs
+     */
+    const std::vector<NodeID> &inputs();
+    /** Returns nodes of graph
+     *
+     * @warning Nodes can be nullptr if they have been removed during the mutation steps of the graph
+     *
+     * @return Nodes of graph
+     */
+    std::vector<std::unique_ptr<INode>> &nodes();
+    /** Returns nodes of graph
+     *
+     * @warning Nodes can be nullptr if they have been removed during the mutation steps of the graph
+     *
+     * @return Nodes of graph
+     */
+    const std::vector<std::unique_ptr<INode>> &nodes() const;
+    /** Returns edges of graph
+     *
+     * @warning Edges can be nullptr if they have been removed during the mutation steps of the graph
+     *
+     * @return Edges of graph
+     */
+    const std::vector<std::unique_ptr<Edge>> &edges() const;
+    /** Returns tensors of graph
+     *
+     * @warning Tensor can be nullptr if they have been removed during the mutation steps of the graph
+     *
+     * @return Tensors of graph
+     */
+    std::vector<std::unique_ptr<Tensor>> &tensors();
+    /** Returns tensors of graph
+     *
+     * @warning Tensor can be nullptr if they have been removed during the mutation steps of the graph
+     *
+     * @return Tensors of graph
+     */
+    const std::vector<std::unique_ptr<Tensor>> &tensors() const;
+    /** Get node object given its id
+     *
+     * @warning Can be nullptr if node was removed during the mutation steps of the graph
+     *
+     * @param[in] id Node ID
+     *
+     * @return The actual node object
+     */
+    const INode *node(NodeID id) const;
+    /** Get node object given its id
+     *
+     * @warning Can be nullptr if node was removed during the mutation steps of the graph
+     *
+     * @param[in] id Node ID
+     *
+     * @return The actual node object
+     */
+    INode *node(NodeID id);
+    /** Get edge object given its id
+     *
+     * @warning Can be nullptr if node was removed during the mutation steps of the graph
+     *
+     * @param[in] id Edge ID
+     *
+     * @return The actual edge object
+     */
+    const Edge *edge(EdgeID id) const;
+    /** Get edge object given its id
+     *
+     * @warning Can be nullptr if node was removed during the mutation steps of the graph
+     *
+     * @param[in] id Edge ID
+     *
+     * @return The actual edge object
+     */
+    Edge *edge(EdgeID id);
+    /** Get tensor object given its id
+     *
+     * @warning Can be nullptr if tensor was removed during the mutation steps of the graph
+     *
+     * @param[in] id Tensor ID
+     *
+     * @return The actual tensor object
+     */
+    const Tensor *tensor(TensorID id) const;
+    /** Get tensor object given its id
+     *
+     * @warning Can be nullptr if tensor was removed during the mutation steps of the graph
+     *
+     * @param[in] id Tensor ID
+     *
+     * @return The actual tensor object
+     */
+    Tensor *tensor(TensorID id);
+
+private:
+    /** Creates a tensor object
+     *
+     * @param[in] desc Tensor descriptor
+     *
+     * @return Tensor ID
+     */
+    TensorID create_tensor(TensorDescriptor desc = TensorDescriptor());
+
+private:
+    GraphID                              _id      = GraphID(0); /**< Graph id */
+    std::string                          _name    = {};         /**< Graph name */
+    std::vector<std::unique_ptr<INode>>  _nodes   = {};         /**< Graph nodes */
+    std::vector<std::unique_ptr<Edge>>   _edges   = {};         /**< Graph edges */
+    std::vector<std::unique_ptr<Tensor>> _tensors = {};         /**< Graph tensors */
+    std::map<NodeType, std::vector<NodeID>> _tagged_nodes = {}; /**< Graph nodes map with the node type as key */
+    arm_compute::Mutex _mtx = {};                               /**< Mutex used for graph construction */
+};
+
+template <typename NT, typename... Ts>
+inline NodeID Graph::add_node(Ts &&... args)
+{
+    std::lock_guard<arm_compute::Mutex> lock(_mtx);
+
+    // Create node
+    NodeID nid  = _nodes.size();
+    auto   node = support::cpp14::make_unique<NT>(std::forward<Ts>(args)...);
+    node->set_graph(this);
+    node->set_id(nid);
+
+    // Keep track of input nodes
+    if(node->type() == NodeType::Input)
+    {
+        _tagged_nodes[NodeType::Input].push_back(nid);
+    }
+
+    // Associate a new tensor with each output
+    for(auto &output : node->_outputs)
+    {
+        output = create_tensor();
+    }
+
+    // Propagate node shape if possible
+    node->forward_descriptors();
+
+    // Add node to the graph nodes
+    _nodes.push_back(std::move(node));
+
+    return nid;
+}
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_GRAPH_H__ */
diff --git a/arm_compute/graph2/GraphBuilder.h b/arm_compute/graph2/GraphBuilder.h
new file mode 100644
index 0000000..f92746a
--- /dev/null
+++ b/arm_compute/graph2/GraphBuilder.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_GRAPH_BUILDER_H__
+#define __ARM_COMPUTE_GRAPH2_GRAPH_BUILDER_H__
+
+#include "arm_compute/graph2/ITensorAccessor.h"
+#include "arm_compute/graph2/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declaration
+class Graph;
+
+/** Graph builder class
+ *
+ * Builds and compiles a graph
+ */
+class GraphBuilder final
+{
+public:
+    /** Adds a Const node to the graph
+     *
+     * @param[in] g        Graph to add the node to
+     * @param[in] params   Common node parameters
+     * @param[in] desc     Tensor descriptor of the node
+     * @param[in] accessor (Optional) Accessor of the const node data
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_const_node(Graph &g, NodeParams params, TensorDescriptor desc, ITensorAccessorUPtr accessor = nullptr);
+    /** Adds an input layer node to the graph
+     *
+     * @param[in] g        Graph to add the node to
+     * @param[in] params   Common node parameters
+     * @param[in] desc     Tensor descriptor of the Tensor
+     * @param[in] accessor (Optional) Accessor of the input node data
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_input_node(Graph &g, NodeParams params, TensorDescriptor desc, ITensorAccessorUPtr accessor = nullptr);
+    /** Adds an output layer node to the graph
+     *
+     * @param[in] g        Graph to add the node to
+     * @param[in] params   Common node parameters
+     * @param[in] input    Input to the output node as a NodeID-Index pair
+     * @param[in] accessor (Optional) Accessor of the output node data
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor = nullptr);
+    /** Adds an activation layer node to the graph
+     *
+     * @param[in] g        Graph to add the node to
+     * @param[in] params   Common node parameters
+     * @param[in] input    Input to the activation layer node as a NodeID-Index pair
+     * @param[in] act_info Activation layer information
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_activation_node(Graph &g, NodeParams params, NodeIdxPair input, ActivationLayerInfo act_info);
+    /** Adds a batch normalization layer node to the graph
+     *
+     * @param[in] g              Graph to add the node to
+     * @param[in] params         Common node parameters
+     * @param[in] input          Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] epsilon        Epsilon parameter
+     * @param[in] mean_accessor  Const Node ID that contains the mean values
+     * @param[in] var_accessor   Const Node ID that contains the variance values
+     * @param[in] beta_accessor  Const Node ID that contains the beta values. Can be EmptyNodeID
+     * @param[in] gamma_accessor Const Node ID that contains the gamma values. Can be EmptyNodeID
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_batch_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, float epsilon,
+                                               ITensorAccessorUPtr mean_accessor = nullptr, ITensorAccessorUPtr var_accessor = nullptr,
+                                               ITensorAccessorUPtr beta_accessor = nullptr, ITensorAccessorUPtr gamma_accessor = nullptr);
+    /** Adds a convolution layer node to the graph
+     *
+     * @param[in] g                     Graph to add the node to
+     * @param[in] params                Common node parameters
+     * @param[in] input                 Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] kernel_spatial_extend Spatial extend of convolution kernels
+     * @param[in] depth                 Number of convolution kernels
+     * @param[in] conv_info             Convolution layer information
+     * @param[in] method                (Optional) Convolution method to use
+     * @param[in] weights_accessor      (Optional) Accessor of the weights node data
+     * @param[in] bias_accessor         (Optional) Accessor of the bias node data
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
+                                       Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info,
+                                       ConvolutionMethod   method           = ConvolutionMethod::DEFAULT,
+                                       ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
+    /** Adds a depth concatenate node to the graph
+     *
+     * @param[in] g      Graph to add the node to
+     * @param[in] params Common node parameters
+     * @param[in] inputs Input to the batch normalization layer node as a NodeID-Index pair
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_depth_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs);
+    /** Adds a depth-wise convolution layer node to the graph
+     *
+     * @param[in] g                     Graph to add the node to
+     * @param[in] params                Common node parameters
+     * @param[in] input                 Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] kernel_spatial_extend Spatial extend of convolution kernels
+     * @param[in] conv_info             Convolution layer information
+     * @param[in] method                (Optional) Convolution method to use
+     * @param[in] weights_accessor      (Optional) Accessor of the weights node data
+     * @param[in] bias_accessor         (Optional) Accessor of the bias node data
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
+                                                 Size2D kernel_spatial_extend, PadStrideInfo conv_info,
+                                                 DepthwiseConvolutionMethod method    = DepthwiseConvolutionMethod::DEFAULT,
+                                                 ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
+    /** Adds an element-wise layer node to the graph
+     *
+     * @param[in] g         Graph to add the node to
+     * @param[in] params    Common node parameters
+     * @param[in] input0    First input to the element-wise operation layer node as a NodeID-Index pair
+     * @param[in] input1    Second input to the element-wise operation layer node as a NodeID-Index pair
+     * @param[in] operation Element-wise operation to perform
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
+    /** Adds a flatten layer node to the graph
+     *
+     * @param[in] g      Graph to add the node to
+     * @param[in] params Common node parameters
+     * @param[in] input  Input to the batch normalization layer node as a NodeID-Index pair
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_flatten_node(Graph &g, NodeParams params, NodeIdxPair input);
+    /** Adds a fully connected layer node to the graph
+     *
+     * @param[in] g                Graph to add the layer to
+     * @param[in] params           Common node parameters
+     * @param[in] input            Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] num_outputs      Number of output neurons
+     * @param[in] weights_accessor (Optional) Accessor of the weights node data
+     * @param[in] bias_accessor    (Optional) Accessor of the bias node data
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_fully_connected_layer(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_outputs,
+                                            ITensorAccessorUPtr weights_accessor = nullptr, ITensorAccessorUPtr bias_accessor = nullptr);
+    /** Adds a normalization layer node to the graph
+     *
+     * @param[in] g         Graph to add the node to
+     * @param[in] params    Common node parameters
+     * @param[in] input     Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] norm_info Normalization layer information
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, NormalizationLayerInfo norm_info);
+    /** Adds a pooling layer node to the graph
+     *
+     * @param[in] g         Graph to add the node to
+     * @param[in] params    Common node parameters
+     * @param[in] input     Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] pool_info Pooling layer information
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_pooling_node(Graph &g, NodeParams params, NodeIdxPair input, PoolingLayerInfo pool_info);
+    /** Adds a reshape layer node to the graph
+     *
+     * @param[in] g      Graph to add the node to
+     * @param[in] params Common node parameters
+     * @param[in] input  Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] shape  Output reshaped shape
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_reshape_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape);
+    /** Adds a softmax node to the graph
+     *
+     * @param[in] g      Graph to add the node to
+     * @param[in] params Common node parameters
+     * @param[in] input  Input to the batch normalization layer node as a NodeID-Index pair
+     * @param[in] beta   Beta parameter
+     *
+     * @return Node ID of the created node, EmptyNodeID in case of error
+     */
+    static NodeID add_softmax_node(Graph &g, NodeParams params, NodeIdxPair input, float beta = 1.f);
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_GRAPH_BUILDER_H__ */
diff --git a/arm_compute/graph2/GraphContext.h b/arm_compute/graph2/GraphContext.h
new file mode 100644
index 0000000..bd579ea
--- /dev/null
+++ b/arm_compute/graph2/GraphContext.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_GRAPH_CONTEXT_H__
+#define __ARM_COMPUTE_GRAPH2_GRAPH_CONTEXT_H__
+
+#include "arm_compute/graph2/Types.h"
+
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <map>
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Contains structs required for memory management */
+struct MemoryManagerContext
+{
+    Target                                       target = { Target::UNSPECIFIED };
+    std::shared_ptr<arm_compute::IMemoryManager> mm     = { nullptr };
+};
+
+/** Graph context **/
+class GraphContext final
+{
+public:
+    /** Constructor */
+    GraphContext();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    GraphContext(const GraphContext &) = delete;
+    /** Default move constructor */
+    GraphContext(GraphContext &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    GraphContext &operator=(const GraphContext &) = delete;
+    /** Default move assignment operator */
+    GraphContext &operator=(GraphContext &&) = default;
+    /** Enables tuning
+     *
+     * @param[in] enable_tuning Enables tuning if true
+     */
+    void enable_tuning(bool enable_tuning);
+    /** Checks if tuning is enabled
+     *
+     * @return True if tuning is enabled else false
+     */
+    bool is_tuning_enabled() const;
+    /** Enables memory management
+     *
+     * @param[in] enable_mm Enables mm if true
+     */
+    void enable_memory_managenent(bool enable_mm);
+    /** Checks if memory management is enabled
+     *
+     * @return True if memory management is enabled else false
+     */
+    bool is_memory_management_enabled();
+    /** Inserts a memory manager context
+     *
+     * @param[in] memory_ctx Memory manage context
+     *
+     * @return If the insertion succeeded else false
+     */
+    bool insert_memory_management_ctx(MemoryManagerContext &&memory_ctx);
+    /** Gets a memory manager context for a given target
+     *
+     * @param[in] target To retrieve the management context
+     *
+     * @return Management context for the target if exists else nullptr
+     */
+    MemoryManagerContext *memory_management_ctx(Target target);
+    /** Finalizes memory managers in graph context */
+    void finalize();
+
+private:
+    bool _tunable;                                           /**< Specifies if the Graph should use a tunable object */
+    bool _memory_managed;                                    /**< Specifies if the Graph should use a memory managed */
+    std::map<Target, MemoryManagerContext> _memory_managers; /**< Memory managers for each target */
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_GRAPH_CONTEXT_H__ */
diff --git a/arm_compute/graph2/GraphManager.h b/arm_compute/graph2/GraphManager.h
new file mode 100644
index 0000000..0d5835f
--- /dev/null
+++ b/arm_compute/graph2/GraphManager.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_GRAPH_MANAGER_H__
+#define __ARM_COMPUTE_GRAPH2_GRAPH_MANAGER_H__
+
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/graph2/Workload.h"
+
+#include <map>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declaration
+class Graph;
+class GraphContext;
+class PassManager;
+
+/** Graph manager class
+ *
+ * Manages a list of graphs along with their resources
+ */
+class GraphManager final
+{
+public:
+    /** Default Constructor **/
+    GraphManager();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    GraphManager(const GraphManager &) = delete;
+    /** Default move constructor */
+    GraphManager(GraphManager &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    GraphManager &operator=(const GraphManager &) = delete;
+    /** Default move assignment operator */
+    GraphManager &operator=(GraphManager &&) = default;
+    /** Finalizes a given graph
+     *
+     * @warning At this given time finalize_graph will alter the passed graph,
+     *          plan is to avoid by copying the graph structure,
+     *          or provide another entry-point for this functionality as it will increase the memory requirements
+     *
+     * @param[in] graph  Graph to finalize
+     * @param[in] ctx    Graph context
+     * @param[in] pm     Pass manager to use for any optimization passes
+     * @param[in] target Execution target (Single target execution is currently supported)
+     */
+    void finalize_graph(Graph &graph, GraphContext &ctx, PassManager &pm, Target target);
+    /** Executes a graph
+     *
+     * @param[in] graph Graph to execute
+     */
+    void execute_graph(Graph &graph);
+    /** Invalidates the graph execution workload
+     *
+     * @param[in] graph Graph to invalidate
+     */
+    void invalidate_graph(Graph &graph);
+
+private:
+    std::map<GraphID, ExecutionWorkload> _workloads = {}; /**< Graph workloads */
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_GRAPH_MANAGER_H__ */
diff --git a/arm_compute/graph2/IDeviceBackend.h b/arm_compute/graph2/IDeviceBackend.h
new file mode 100644
index 0000000..771ff85
--- /dev/null
+++ b/arm_compute/graph2/IDeviceBackend.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_IDEVICEBACKEND_H__
+#define __ARM_COMPUTE_GRAPH2_IDEVICEBACKEND_H__
+
+#include "arm_compute/graph2/ITensorHandle.h"
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+class GraphContext;
+class Tensor;
+class INode;
+
+namespace backends
+{
+/** Device backend interface */
+class IDeviceBackend
+{
+public:
+    /** Virtual Destructor */
+    virtual ~IDeviceBackend() = default;
+    /** Initializes the backend */
+    virtual void initialize_backend() = 0;
+    /** Setups the given graph context
+     *
+     * @param[in] ctx Graph context
+     */
+    virtual void setup_backend_context(GraphContext &ctx) = 0;
+    /** Create a backend Tensor
+     *
+     * @param[in] tensor The tensor we want to create a backend tensor for
+     *
+     * @return Backend tensor handle
+     */
+    virtual std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) = 0;
+    /** Create a backend Sub-Tensor
+     *
+     * @param[in] parent Parent sub-tensor handle
+     * @param[in] shape  Shape of the sub-tensor
+     * @param[in] coords Starting coordinates of the sub-tensor
+     *
+     * @return Backend sub-tensor handle
+     */
+    virtual std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords) = 0;
+    /** Configure a backend Node
+     *
+     * @note This creates an appropriate configured backend function for the given node
+     *
+     * @param[in] node The node we want to configure
+     * @param[in] ctx  Context to use
+     *
+     * @return Backend execution function
+     */
+    virtual std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) = 0;
+    /** Validate a node
+     *
+     * @param[in] node The node we want to validate
+     *
+     * @return An error status
+     */
+    virtual Status validate_node(const INode &node) = 0;
+    /** Create a backend memory manager given its affinity
+     *
+     * @param[in] affinity Memory Manager affinity
+     *
+     * @return Memory manager
+     */
+    virtual std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) = 0;
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_IDEVICEBACKEND_H__
diff --git a/arm_compute/graph2/IGraphMutator.h b/arm_compute/graph2/IGraphMutator.h
new file mode 100644
index 0000000..50151c8
--- /dev/null
+++ b/arm_compute/graph2/IGraphMutator.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_IGRAPHMUTATOR_H__
+#define __ARM_COMPUTE_GRAPH2_IGRAPHMUTATOR_H__
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+
+/** Graph mutator interface */
+class IGraphMutator
+{
+public:
+    /** Virtual Destructor */
+    virtual ~IGraphMutator() = default;
+    /** Walk the graph and perform a specific mutation
+     *
+     * @param[in, out] g Graph to walk and mutate
+     */
+    virtual void mutate(Graph &g) = 0;
+    /** Returns mutator name
+     *
+     * @return Mutator name
+     */
+    virtual const char *name() = 0;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_IGRAPHMUTATOR_H__ */
diff --git a/arm_compute/graph2/IGraphPrinter.h b/arm_compute/graph2/IGraphPrinter.h
new file mode 100644
index 0000000..eb85f97
--- /dev/null
+++ b/arm_compute/graph2/IGraphPrinter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_IGRAPHPRINTER_H__
+#define __ARM_COMPUTE_GRAPH2_IGRAPHPRINTER_H__
+
+#include <ostream>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+
+/** Graph printer interface */
+class IGraphPrinter
+{
+public:
+    /** Virtual Destructor */
+    virtual ~IGraphPrinter() = default;
+    /** Print graph
+     *
+     * @param[in]  g  Graph to print
+     * @param[out] os Output stream
+     */
+    virtual void print(const Graph &g, std::ostream &os) = 0;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_IGRAPHPRINTER_H__ */
diff --git a/arm_compute/graph2/INode.h b/arm_compute/graph2/INode.h
new file mode 100644
index 0000000..ba5620b
--- /dev/null
+++ b/arm_compute/graph2/INode.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_INODE_H__
+#define __ARM_COMPUTE_GRAPH2_INODE_H__
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph2/TensorDescriptor.h"
+#include "arm_compute/graph2/Types.h"
+
+#include <set>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+class Edge;
+class INodeVisitor;
+class Tensor;
+
+/** Node interface */
+class INode
+{
+public:
+    /** Constructor */
+    INode();
+    /** Destructor **/
+    virtual ~INode() = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    INode(const INode &) = delete;
+    /** Prevent instances of this class from being copy assigned (As this class contains pointers) */
+    INode &operator=(const INode &) = delete;
+    /** Allow instances of this class to be moved */
+    INode(INode &&) = default;
+    /** Allow instances of this class to be move assigned */
+    INode &operator=(INode &&) = default;
+    /** Validate node
+     *
+     * @return Status containing any errors
+     */
+    virtual Status validate() = 0;
+    /** Returns node's type
+     *
+     * @return Node's type
+     */
+    virtual NodeType type() const = 0;
+    /** Accepts a node visitor
+     *
+     * @param[in] v Visitor to accept
+     */
+    virtual void accept(INodeVisitor &v) = 0;
+    /** Forwards descriptor information to outputs if possible
+     *
+     * @return True if descriptor information could be forwarded otherwise false
+     */
+    virtual bool forward_descriptors() = 0;
+    /** Calculates output configuration
+     *
+     * @param[in] idx Output index to configure
+     *
+     * @return Output descriptor configuration
+     */
+    virtual TensorDescriptor configure_output(size_t idx) const = 0;
+    /** Returns node's name
+     *
+     * @return Node name
+     */
+    std::string name() const;
+    /** Returns node's ID
+     *
+     * @return Node's ID
+     */
+    NodeID id() const;
+    /** Returns node's Graph
+     *
+     * @return Node's graph
+     */
+    const Graph *graph() const;
+    /** Returns node's Graph
+     *
+     * @return Node's graph
+     */
+    Graph *graph();
+    /** Sets the graph that this node is registered to
+     *
+     * @param[in] g Back reference to graph
+     */
+    void set_graph(Graph *g);
+    /** Sets the node id
+     *
+     * @param[in] id Node id
+     */
+    void set_id(NodeID id);
+    /** Sets common node parameters
+     *
+     * @param[in] common_params Common node parameters to set
+     */
+    void set_common_node_parameters(NodeParams common_params);
+    /** Sets target preference
+     *
+     * @note This is not the target that the graph executor might choose, its just an indication
+     *
+     * @param[in] target Target preference
+     */
+    void set_requested_target(Target target);
+    /** Sets the final execution target
+     *
+     * @note GraphManager might change this target
+     *
+     * @param[in] target Final execution target
+     */
+    void set_assigned_target(Target target);
+    /** Sets the output tensor of at a given index
+     *
+     * @note All edges will get updated
+     *
+     * @param[in] tid Tensor ID
+     * @param[in] idx Output index
+     */
+    void set_output_tensor(TensorID tid, size_t idx);
+    /** Returns inputs of the node
+     *
+     * @return Inputs of the node
+     */
+    const std::vector<TensorID> &inputs() const;
+    /** Returns outputs of the node
+     *
+     * @return Outputs of the node
+     */
+    const std::vector<TensorID> &outputs() const;
+    /** Returns input edge set
+     *
+     * @return Set of input edges
+     */
+    const std::vector<EdgeID> &input_edges() const;
+    /** Returns output edge set
+     *
+     * @return Set of output edges
+     */
+    const std::set<EdgeID> &output_edges() const;
+    /** Returns the tensor ID of a given input of the node
+     *
+     * @note Precondition : idx should be a valid input index
+     *
+     * @param[in] idx Index of the node input
+     *
+     * @return TensorID of the requested input
+     */
+    TensorID input_id(size_t idx) const;
+    /** Returns the tensor ID of a given output of the node
+     *
+     * @note Precondition : idx should be a valid output index
+     *
+     * @param[in] idx Index of the node output
+     *
+     * @return TensorID of the requested output
+     */
+    TensorID output_id(size_t idx) const;
+    /** Returns the tensor of a given input of the node
+     *
+     * @note Precondition : idx should be a valid input index
+     *
+     * @param[in] idx Index of the node input
+     *
+     * @return Tensor of the requested input
+     */
+    Tensor *input(size_t idx) const;
+    /** Returns the tensor of a given output of the node
+     *
+     * @note Precondition : idx should be a valid output index
+     *
+     * @param[in] idx Index of the node output
+     *
+     * @return Tensor of the requested output
+     */
+    Tensor *output(size_t idx) const;
+    /** Returns the edge ID of a given input of the node
+     *
+     * @note Precondition : idx should be a valid input index
+     *
+     * @param[in] idx Index of the node input
+     *
+     * @return EdgeID of the requested input
+     */
+    EdgeID input_edge_id(size_t idx) const;
+    /** Returns the edge of a given input of the node
+     *
+     * @note Precondition : idx should be a valid input index
+     *
+     * @param[in] idx Index of the node input
+     *
+     * @return Edge of the requested input
+     */
+    Edge *input_edge(size_t idx) const;
+    /** Returns number of inputs of the node
+     *
+     * @return Number of inputs
+     */
+    size_t num_inputs() const;
+    /** Returns number of outputs of the node
+     *
+     * @return Number of outputs
+     */
+    size_t num_outputs() const;
+    /** Returns requested target for this node
+     *
+     * @return Requested execution target
+     */
+    Target requested_target() const;
+    /** Returns assigned target for this node
+     *
+     * @return Assigned target of this node
+     */
+    Target assigned_target() const;
+
+protected:
+    friend class Graph;
+
+protected:
+    Graph                *_graph;           /**< Backward reference to graph owning the node */
+    NodeID                _id;              /**< Node ID */
+    NodeParams            _common_params;   /**< Node common params */
+    std::vector<TensorID> _outputs;         /**< Output of the node */
+    std::vector<EdgeID>   _input_edges;     /**< Inputs edge set */
+    std::set<EdgeID>      _output_edges;    /**< Output edge set */
+    Target                _assigned_target; /**< Assigned target by the Graph executor */
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_INODE_H__ */
diff --git a/arm_compute/graph2/INodeVisitor.h b/arm_compute/graph2/INodeVisitor.h
new file mode 100644
index 0000000..429a258
--- /dev/null
+++ b/arm_compute/graph2/INodeVisitor.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_INODEVISITOR_H__
+#define __ARM_COMPUTE_GRAPH2_INODEVISITOR_H__
+
+#include "arm_compute/graph2/nodes/NodesFwd.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+/**  Node visitor interface */
+class INodeVisitor
+{
+public:
+    virtual ~INodeVisitor()                              = default;
+    virtual void visit(INode &n)                         = 0;
+    virtual void visit(ActivationLayerNode &n)           = 0;
+    virtual void visit(BatchNormalizationLayerNode &n)   = 0;
+    virtual void visit(ConstNode &n)                     = 0;
+    virtual void visit(ConvolutionLayerNode &n)          = 0;
+    virtual void visit(DepthConcatenateLayerNode &n)     = 0;
+    virtual void visit(DepthwiseConvolutionLayerNode &n) = 0;
+    virtual void visit(EltwiseLayerNode &n)              = 0;
+    virtual void visit(FlattenLayerNode &n)              = 0;
+    virtual void visit(FullyConnectedLayerNode &n)       = 0;
+    virtual void visit(InputNode &n)                     = 0;
+    virtual void visit(NormalizationLayerNode &n)        = 0;
+    virtual void visit(OutputNode &n)                    = 0;
+    virtual void visit(PoolingLayerNode &n)              = 0;
+    virtual void visit(ReshapeLayerNode &n)              = 0;
+    virtual void visit(SoftmaxLayerNode &n)              = 0;
+};
+
+/** Default visitor implementation
+ *
+ * Implements visit methods by calling a default function.
+ * Inherit from DefaultNodeVisitor if you don't want to provide specific implementation for all nodes.
+ */
+class DefaultNodeVisitor : public INodeVisitor
+{
+public:
+    virtual ~DefaultNodeVisitor() = default;
+
+    // Inherited methods overridden
+    virtual void visit(INode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(ActivationLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(BatchNormalizationLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(ConstNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(ConvolutionLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(DepthConcatenateLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(DepthwiseConvolutionLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(EltwiseLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(FlattenLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(FullyConnectedLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(InputNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(NormalizationLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(OutputNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(PoolingLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(ReshapeLayerNode &n) override
+    {
+        default_visit();
+    }
+    virtual void visit(SoftmaxLayerNode &n) override
+    {
+        default_visit();
+    }
+
+    /** Function to be overloaded by the client and implement default behavior for the
+     *  non-overloaded visitors
+     */
+    virtual void default_visit() = 0;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_INODEVISITOR_H__ */
diff --git a/arm_compute/graph2/ITensorAccessor.h b/arm_compute/graph2/ITensorAccessor.h
new file mode 100644
index 0000000..1a72773
--- /dev/null
+++ b/arm_compute/graph2/ITensorAccessor.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ITENSOR_ACCESSOR_H__
+#define __ARM_COMPUTE_GRAPH2_ITENSOR_ACCESSOR_H__
+
+#include "arm_compute/graph/ITensorAccessor.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward Declarations
+class ITensorHandle;
+using ITensorAccessor     = graph::ITensorAccessor;
+using ITensorAccessorUPtr = std::unique_ptr<ITensorAccessor>;
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_SUB_STREAM_H__ */
\ No newline at end of file
diff --git a/arm_compute/graph2/ITensorHandle.h b/arm_compute/graph2/ITensorHandle.h
new file mode 100644
index 0000000..68f79d8
--- /dev/null
+++ b/arm_compute/graph2/ITensorHandle.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ITENSORHANDLE_H__
+#define __ARM_COMPUTE_GRAPH2_ITENSORHANDLE_H__
+
+#include "arm_compute/core/ITensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Tensor handle interface object **/
+class ITensorHandle
+{
+public:
+    /** Default virtual destructor **/
+    virtual ~ITensorHandle() = default;
+    /** Allocates backend memory for the handle **/
+    virtual void allocate() = 0;
+    /** Backend tensor object accessor **/
+    virtual arm_compute::ITensor &tensor() = 0;
+    /** Backend tensor object const accessor **/
+    virtual const arm_compute::ITensor &tensor() const = 0;
+    /** Maps backend tensor object
+     *
+     * @param[in] blocking Flags if the mapping operations should be blocking
+     */
+    virtual void map(bool blocking) = 0;
+    /** Un-maps a backend tensor object **/
+    virtual void unmap() = 0;
+    /** Checks if a backing tensor is a sub-tensor object or not
+     *
+     * @return True if the backend tensor is a sub-tensor else false
+     */
+    virtual bool is_subtensor() const = 0;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ITENSORHANDLE_H__ */
diff --git a/arm_compute/graph2/Logger.h b/arm_compute/graph2/Logger.h
new file mode 100644
index 0000000..f3c263c
--- /dev/null
+++ b/arm_compute/graph2/Logger.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_LOGGER_H__
+#define __ARM_COMPUTE_GRAPH2_LOGGER_H__
+
+#include "arm_compute/core/utils/logging/Macros.h"
+
+/** Create a default core logger
+ *
+ * @note It will eventually create all default loggers in don't exist
+ */
+#define ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER()                                  \
+    do                                                                             \
+    {                                                                              \
+        if(arm_compute::logging::LoggerRegistry::get().logger("GRAPH") == nullptr) \
+        {                                                                          \
+            arm_compute::logging::LoggerRegistry::get().create_reserved_loggers(); \
+        }                                                                          \
+    } while(false)
+
+#define ARM_COMPUTE_LOG_GRAPH(log_level, x)    \
+    ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER(); \
+    ARM_COMPUTE_LOG_STREAM("GRAPH", log_level, x)
+
+#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)       \
+    ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER(); \
+    ARM_COMPUTE_LOG_STREAM("GRAPH", arm_compute::logging::LogLevel::VERBOSE, x)
+
+#define ARM_COMPUTE_LOG_GRAPH_INFO(x)          \
+    ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER(); \
+    ARM_COMPUTE_LOG_STREAM("GRAPH", arm_compute::logging::LogLevel::INFO, x)
+
+#define ARM_COMPUTE_LOG_GRAPH_WARNING(x)       \
+    ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER(); \
+    ARM_COMPUTE_LOG_STREAM("GRAPH", arm_compute::logging::LogLevel::WARN, x)
+
+#define ARM_COMPUTE_LOG_GRAPH_ERROR(x)         \
+    ARM_COMPUTE_CREATE_DEFAULT_GRAPH_LOGGER(); \
+    ARM_COMPUTE_LOG_STREAM("GRAPH", arm_compute::logging::LogLevel::ERROR, x)
+
+#endif /* __ARM_COMPUTE_GRAPH2_LOGGER_H__ */
diff --git a/arm_compute/graph2/PassManager.h b/arm_compute/graph2/PassManager.h
new file mode 100644
index 0000000..b1db18b
--- /dev/null
+++ b/arm_compute/graph2/PassManager.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_PASSMANAGER_H__
+#define __ARM_COMPUTE_GRAPH2_PASSMANAGER_H__
+
+#include "arm_compute/graph2/IGraphMutator.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+
+/** Pass manager
+ *
+ * Responsible for performing the mutating graph passes with a given order
+ **/
+class PassManager final
+{
+public:
+    /** Constructor */
+    PassManager();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    PassManager(const PassManager &) = delete;
+    /** Default move constructor */
+    PassManager(PassManager &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    PassManager &operator=(const PassManager &) = delete;
+    /** Default move assignment operator */
+    PassManager &operator=(PassManager &&) = default;
+    /** Mutation passes accessors
+     *
+     * @return Returns the vector with the mutation passes that are to be executed on a graph
+     */
+    const std::vector<std::unique_ptr<IGraphMutator>> &passes() const;
+    /** Accessor of a pass at a given index
+     *
+     * @param[in] index Index of the requested pass
+     *
+     * @return A pointer to the given pass if exists else nullptr
+     */
+    IGraphMutator *pass(size_t index);
+    /** Appends a mutation pass
+     *
+     * @param[in] pass Pass to append
+     */
+    void append(std::unique_ptr<IGraphMutator> pass);
+    /** Clears all the passes */
+    void clear();
+    /** Runs all the mutation passes on a given graph
+     *
+     * @param[in] g Graph to run the mutations on
+     */
+    void run_all(Graph &g);
+    /** Runs a specific mutation pass on a given graph
+     *
+     * @param[in] g     Graph to run the mutation on
+     * @param[in] index Index of the mutation to execute
+     */
+    void run(Graph &g, size_t index);
+
+private:
+    std::vector<std::unique_ptr<IGraphMutator>> _passes; /**< Vector of graph passes */
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_PASSMANAGER_H__ */
diff --git a/arm_compute/graph2/Tensor.h b/arm_compute/graph2/Tensor.h
new file mode 100644
index 0000000..fcba854
--- /dev/null
+++ b/arm_compute/graph2/Tensor.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_TENSOR_H__
+#define __ARM_COMPUTE_GRAPH2_TENSOR_H__
+
+#include "arm_compute/graph2/Types.h"
+
+#include "arm_compute/graph2/ITensorAccessor.h"
+#include "arm_compute/graph2/ITensorHandle.h"
+#include "arm_compute/graph2/TensorDescriptor.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Tensor object **/
+class Tensor final
+{
+public:
+    /** Default constructor
+     *
+     * @param[in] id   Tensor ID
+     * @param[in] desc Tensor information
+     */
+    Tensor(TensorID id, TensorDescriptor desc);
+    /** Tensor ID accessor
+     *
+     * @return Tensor ID
+     */
+    TensorID id() const;
+    /** TensorInfo metadata accessor
+     *
+     * @return Tensor descriptor metadata
+     */
+    TensorDescriptor &desc();
+    /** TensorInfo metadata accessor
+     *
+     * @return Tensor descriptor metadata
+     */
+    const TensorDescriptor &desc() const;
+    /** Sets the backend tensor
+     *
+     * @param[in] backend_tensor Backend tensor to set
+     */
+    void set_handle(std::unique_ptr<ITensorHandle> backend_tensor);
+    /** Backend tensor handle accessor
+     *
+     * @return Backend tensor handle
+     */
+    ITensorHandle *handle();
+    /** Sets the backend tensor accessor
+     *
+     * @param[in] accessor Accessor to set
+     */
+    void set_accessor(std::unique_ptr<ITensorAccessor> accessor);
+    /** Backend tensor accessor
+     *
+     * @return Backend tensor accessor
+     */
+    ITensorAccessor *accessor();
+    /** Calls accessor on tensor
+     *
+     * @return True if the accessor was called else false
+     */
+    bool call_accessor();
+    /** Binds the tensor with an edge
+     *
+     * @param[in] eid Edge ID that is bound to the tensor
+     */
+    void bind_edge(EdgeID eid);
+    /** Unbinds an edge from a tensor
+     *
+     * @param[in] eid Edge to unbind
+     */
+    void unbind_edge(EdgeID eid);
+    /** Accessor the edges that are bound with the tensor
+     *
+     * @return Bound edges
+     */
+    const std::set<EdgeID> bound_edges() const;
+
+private:
+    TensorID                         _id;          /**< Tensor id */
+    TensorDescriptor                 _desc;        /**< Tensor metadata */
+    std::unique_ptr<ITensorHandle>   _handle;      /**< Tensor Handle */
+    std::unique_ptr<ITensorAccessor> _accessor;    /**< Tensor Accessor */
+    std::set<EdgeID>                 _bound_edges; /**< Edges bound to this tensor */
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_TENSOR_H__ */
diff --git a/arm_compute/graph2/TensorDescriptor.h b/arm_compute/graph2/TensorDescriptor.h
new file mode 100644
index 0000000..ff23f71
--- /dev/null
+++ b/arm_compute/graph2/TensorDescriptor.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_TENSOR_DESCRIPTOR_H__
+#define __ARM_COMPUTE_GRAPH2_TENSOR_DESCRIPTOR_H__
+
+#include "arm_compute/graph2/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Tensor metadata class */
+struct TensorDescriptor final
+{
+    /** Default Constructor **/
+    TensorDescriptor() = default;
+    /** Constructor
+     *
+     * @param[in] tensor_shape     Tensor shape
+     * @param[in] tensor_data_type Tensor data type
+     * @param[in] tensor_target    Target to allocate the tensor for
+     */
+    TensorDescriptor(TensorShape tensor_shape, DataType tensor_data_type, Target tensor_target = Target::UNSPECIFIED)
+        : shape(tensor_shape), data_type(tensor_data_type), target(tensor_target)
+    {
+    }
+
+    TensorShape shape{};
+    DataType    data_type{ DataType::UNKNOWN };
+    Target      target{ Target::UNSPECIFIED };
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_TENSOR_DESCRIPTOR_H__ */
diff --git a/arm_compute/graph2/TypePrinter.h b/arm_compute/graph2/TypePrinter.h
new file mode 100644
index 0000000..de675c4
--- /dev/null
+++ b/arm_compute/graph2/TypePrinter.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_TYPE_PRINTER_H__
+#define __ARM_COMPUTE_GRAPH2_TYPE_PRINTER_H__
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/graph2/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Formatted output of the Dimensions type. */
+template <typename T>
+inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::Dimensions<T> &dimensions)
+{
+    if(dimensions.num_dimensions() > 0)
+    {
+        os << dimensions[0];
+
+        for(unsigned int d = 1; d < dimensions.num_dimensions(); ++d)
+        {
+            os << "x" << dimensions[d];
+        }
+    }
+
+    return os;
+}
+
+/** Formatted output of the Size2D type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const Size2D &size)
+{
+    os << size.width << "x" << size.height;
+
+    return os;
+}
+
+/** Formatted output of the DataType type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const DataType &data_type)
+{
+    switch(data_type)
+    {
+        case DataType::UNKNOWN:
+            os << "UNKNOWN";
+            break;
+        case DataType::U8:
+            os << "U8";
+            break;
+        case DataType::QS8:
+            os << "QS8";
+            break;
+        case DataType::QASYMM8:
+            os << "QASYMM8";
+            break;
+        case DataType::S8:
+            os << "S8";
+            break;
+        case DataType::U16:
+            os << "U16";
+            break;
+        case DataType::S16:
+            os << "S16";
+            break;
+        case DataType::QS16:
+            os << "QS16";
+            break;
+        case DataType::U32:
+            os << "U32";
+            break;
+        case DataType::S32:
+            os << "S32";
+            break;
+        case DataType::U64:
+            os << "U64";
+            break;
+        case DataType::S64:
+            os << "S64";
+            break;
+        case DataType::F16:
+            os << "F16";
+            break;
+        case DataType::F32:
+            os << "F32";
+            break;
+        case DataType::F64:
+            os << "F64";
+            break;
+        case DataType::SIZET:
+            os << "SIZET";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the Target. */
+inline ::std::ostream &operator<<(::std::ostream &os, const Target &target)
+{
+    switch(target)
+    {
+        case Target::UNSPECIFIED:
+            os << "UNSPECIFIED";
+            break;
+        case Target::NEON:
+            os << "NEON";
+            break;
+        case Target::CL:
+            os << "CL";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the activation function type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo::ActivationFunction &act_function)
+{
+    switch(act_function)
+    {
+        case ActivationLayerInfo::ActivationFunction::ABS:
+            os << "ABS";
+            break;
+        case ActivationLayerInfo::ActivationFunction::LINEAR:
+            os << "LINEAR";
+            break;
+        case ActivationLayerInfo::ActivationFunction::LOGISTIC:
+            os << "LOGISTIC";
+            break;
+        case ActivationLayerInfo::ActivationFunction::RELU:
+            os << "RELU";
+            break;
+        case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
+            os << "BOUNDED_RELU";
+            break;
+        case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
+            os << "LEAKY_RELU";
+            break;
+        case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
+            os << "SOFT_RELU";
+            break;
+        case ActivationLayerInfo::ActivationFunction::SQRT:
+            os << "SQRT";
+            break;
+        case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
+            os << "LU_BOUNDED_RELU";
+            break;
+        case ActivationLayerInfo::ActivationFunction::SQUARE:
+            os << "SQUARE";
+            break;
+        case ActivationLayerInfo::ActivationFunction::TANH:
+            os << "TANH";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+inline std::string to_string(const ActivationLayerInfo::ActivationFunction &act_function)
+{
+    std::stringstream str;
+    str << act_function;
+    return str.str();
+}
+
+/** Formatted output of the PoolingType type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const PoolingType &pool_type)
+{
+    switch(pool_type)
+    {
+        case PoolingType::AVG:
+            os << "AVG";
+            break;
+        case PoolingType::MAX:
+            os << "MAX";
+            break;
+        case PoolingType::L2:
+            os << "L2";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the NormType type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const NormType &norm_type)
+{
+    switch(norm_type)
+    {
+        case NormType::CROSS_MAP:
+            os << "CROSS_MAP";
+            break;
+        case NormType::IN_MAP_1D:
+            os << "IN_MAP_1D";
+            break;
+        case NormType::IN_MAP_2D:
+            os << "IN_MAP_2D";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the EltwiseOperation type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const EltwiseOperation &eltwise_op)
+{
+    switch(eltwise_op)
+    {
+        case EltwiseOperation::ADD:
+            os << "ADD";
+            break;
+        case EltwiseOperation::MUL:
+            os << "MUL";
+            break;
+        case EltwiseOperation::SUB:
+            os << "SUB";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the ConvolutionMethod type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const ConvolutionMethod &method)
+{
+    switch(method)
+    {
+        case ConvolutionMethod::DEFAULT:
+            os << "DEFAULT";
+            break;
+        case ConvolutionMethod::DIRECT:
+            os << "DIRECT";
+            break;
+        case ConvolutionMethod::GEMM:
+            os << "GEMM";
+            break;
+        case ConvolutionMethod::WINOGRAD:
+            os << "WINOGRAD";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the DepthwiseConvolutionMethod type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const DepthwiseConvolutionMethod &method)
+{
+    switch(method)
+    {
+        case DepthwiseConvolutionMethod::DEFAULT:
+            os << "DEFAULT";
+            break;
+        case DepthwiseConvolutionMethod::GEMV:
+            os << "GEMV";
+            break;
+        case DepthwiseConvolutionMethod::OPTIMIZED_3x3:
+            os << "OPTIMIZED_3x3";
+            break;
+        default:
+            ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+    }
+
+    return os;
+}
+
+/** Formatted output of the PadStrideInfo type. */
+inline ::std::ostream &operator<<(::std::ostream &os, const PadStrideInfo &pad_stride_info)
+{
+    os << pad_stride_info.stride().first << "," << pad_stride_info.stride().second;
+    os << ";";
+    os << pad_stride_info.pad_left() << "," << pad_stride_info.pad_right() << ","
+       << pad_stride_info.pad_top() << "," << pad_stride_info.pad_bottom();
+
+    return os;
+}
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_TYPE_PRINTER_H__ */
diff --git a/arm_compute/graph2/Types.h b/arm_compute/graph2/Types.h
new file mode 100644
index 0000000..05c15f4
--- /dev/null
+++ b/arm_compute/graph2/Types.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_TYPES_H__
+#define __ARM_COMPUTE_GRAPH2_TYPES_H__
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/strong_type/StrongType.h"
+#include "arm_compute/core/utils/strong_type/StrongTypeAttributes.h"
+
+#include <limits>
+#include <string>
+
+namespace arm_compute
+{
+namespace graph2
+{
+using arm_compute::Status;
+
+using arm_compute::DataType;
+using arm_compute::TensorShape;
+using arm_compute::Size2D;
+
+using arm_compute::ActivationLayerInfo;
+using arm_compute::NormType;
+using arm_compute::NormalizationLayerInfo;
+using arm_compute::PadStrideInfo;
+using arm_compute::PoolingLayerInfo;
+using arm_compute::PoolingType;
+using arm_compute::DimensionRoundingType;
+
+/** TODO (geopin01): Make ids strongly typed */
+using TensorID   = unsigned int;
+using NodeID     = unsigned int;
+using EdgeID     = unsigned int;
+using Activation = arm_compute::ActivationLayerInfo::ActivationFunction;
+
+/**< GraphID strong type */
+using GraphID = strong_type::StrongType<unsigned int, struct graph_id_t, strong_type::Comparable>;
+/* TODO (geopin01): Strong types for NodeID */
+
+/**< Constant TensorID specifying an equivalent of null tensor */
+constexpr TensorID NullTensorID = std::numeric_limits<TensorID>::max();
+/**< Constant NodeID specifying an equivalent of null node */
+constexpr NodeID EmptyNodeID = std::numeric_limits<NodeID>::max();
+/**< Constant EdgeID specifying an equivalent of null edge */
+constexpr EdgeID EmptyEdgeID = std::numeric_limits<EdgeID>::max();
+
+// Forward declarations
+class TensorDescriptor;
+
+/**< Data layout format */
+enum class DataLayout
+{
+    NCHW, /** N(Batches), C(Channels), H(Height), W(Width) from slow to fast moving dimension */
+    NHWC  /** N(Batches), H(Height), W(Width), C(Channels) from slow to fast moving dimension */
+};
+
+/**< Device target types */
+enum class Target
+{
+    UNSPECIFIED, /**< Unspecified Target */
+    NEON,        /**< NEON capable target device */
+    CL,          /**< OpenCL capable target device */
+};
+
+/** Supported Element-wise operations */
+enum class EltwiseOperation
+{
+    ADD, /**< Arithmetic addition */
+    SUB, /**< Arithmetic subtraction */
+    MUL  /**< Arithmetic multiplication */
+};
+
+/** Supported Convolution layer methods */
+enum class ConvolutionMethod
+{
+    DEFAULT, /**< Default approach using internal heuristics */
+    GEMM,    /**< GEMM based convolution */
+    DIRECT,  /**< Deep direct convolution */
+    WINOGRAD /**< Winograd based convolution */
+};
+
+/** Supported Depthwise Convolution layer methods */
+enum class DepthwiseConvolutionMethod
+{
+    DEFAULT,       /**< Default approach using internal heuristics */
+    GEMV,          /**< Generic GEMV based depthwise convolution */
+    OPTIMIZED_3x3, /**< Optimized 3x3 direct depthwise convolution */
+};
+
+/** Supported nodes */
+enum class NodeType
+{
+    ActivationLayer,
+    BatchNormalizationLayer,
+    ConvolutionLayer,
+    DepthConcatenateLayer,
+    DepthwiseConvolutionLayer,
+    EltwiseLayer,
+    FlattenLayer,
+    FullyConnectedLayer,
+    NormalizationLayer,
+    PoolingLayer,
+    ReshapeLayer,
+    SoftmaxLayer,
+
+    Input,
+    Output,
+    Const,
+};
+
+/** Backend Memory Manager affinity **/
+enum class MemoryManagerAffinity
+{
+    Buffer, /**< Affinity at buffer level */
+    Offset  /**< Affinity at offset level */
+};
+
+/** NodeID-index struct
+ *
+ * Used to describe connections
+ */
+struct NodeIdxPair
+{
+    NodeID node_id;
+    size_t index;
+};
+
+/** Common node parameters */
+struct NodeParams
+{
+    std::string name;
+    Target      target;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_TYPES_H__ */
diff --git a/arm_compute/graph2/Utils.h b/arm_compute/graph2/Utils.h
new file mode 100644
index 0000000..750665a
--- /dev/null
+++ b/arm_compute/graph2/Utils.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_UTILS_H__
+#define __ARM_COMPUTE_GRAPH2_UTILS_H__
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/PassManager.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward Declaration
+class GraphContext;
+
+/** Returns the tensor descriptor of a given tensor
+ *
+ * @param[in] g   Graph that the tensor belongs to
+ * @param[in] tid Tensor ID
+ *
+ * @return Tensor descriptor if tensor was found else empty descriptor
+ */
+inline TensorDescriptor get_tensor_descriptor(const Graph &g, TensorID tid)
+{
+    const Tensor *tensor = g.tensor(tid);
+    return (tensor != nullptr) ? tensor->desc() : TensorDescriptor();
+}
+/** Sets an accessor on a given tensor
+ *
+ * @param[in] tensor   Tensor to set the accessor to
+ * @param[in] accessor Accessor to set
+ *
+ * @return True if accessor was set else false
+ */
+inline Status set_tensor_accessor(Tensor *tensor, std::unique_ptr<ITensorAccessor> accessor)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON(tensor == nullptr);
+    tensor->set_accessor(std::move(accessor));
+
+    return Status{};
+}
+/** Checks if a specific target is supported
+ *
+ * @param[in] target Target to check
+ *
+ * @return True if target is support else false
+ */
+bool is_target_supported(Target target);
+/** Returns default target for execution
+ *
+ * @note If an OpenCL backend exists then OpenCL is returned,
+ *       else if the NEON backend exists returns NEON as target.
+ *       If no backends are registered an error is raised.
+ *
+ * @return Default target
+ */
+Target get_default_target();
+/** Forces a single target to all graph constructs
+ *
+ * @param[in] g      Graph to force target on
+ * @param[in] target Target to force
+ */
+void force_target_to_graph(Graph &g, Target target);
+/** Creates a default @ref PassManager
+ *
+ * @return A PassManager with default mutating passes
+ */
+PassManager create_default_pass_manager();
+/** Default setups the graph context if not done manually
+ *
+ * @param[in] ctx Graph Context
+ */
+void setup_default_graph_context(GraphContext &ctx);
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_UTILS_H__ */
diff --git a/arm_compute/graph2/Workload.h b/arm_compute/graph2/Workload.h
new file mode 100644
index 0000000..85506e3
--- /dev/null
+++ b/arm_compute/graph2/Workload.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_WORKLOAD_H__
+#define __ARM_COMPUTE_GRAPH2_WORKLOAD_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class ITensorHandle;
+class INode;
+class Tensor;
+
+/** Execution task
+ *
+ * Contains all the information required to execute a given task
+ */
+struct ExecutionTask
+{
+    // TODO (geopin01) : Support vector of functions?
+    std::unique_ptr<arm_compute::IFunction> task            = {}; /**< Task to execute */
+    INode                                  *node            = {}; /**< Node bound to this workload */
+    std::vector<ITensorHandle *>            commit_handles  = {}; /**< Handles needs to sync for this task to execute */
+    std::vector<ITensorHandle *>            release_handles = {}; /**< Handles that can be released after this node execution */
+
+    /** Function operator */
+    void operator()();
+};
+
+/** Execution workload */
+struct ExecutionWorkload
+{
+    std::vector<Tensor *>      inputs  = {}; /**< Input handles */
+    std::vector<Tensor *>      outputs = {}; /**< Output handles */
+    std::vector<ExecutionTask> tasks   = {}; /**< Execution workload */
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_WORKLOAD_H__ */
diff --git a/arm_compute/graph2/algorithms/Algorithms.h b/arm_compute/graph2/algorithms/Algorithms.h
new file mode 100644
index 0000000..dfc36fe
--- /dev/null
+++ b/arm_compute/graph2/algorithms/Algorithms.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ALGORITHMS_H__
+#define __ARM_COMPUTE_GRAPH2_ALGORITHMS_H__
+
+#include "arm_compute/graph2/algorithms/BFS.h"
+
+#endif /* __ARM_COMPUTE_GRAPH2_ALGORITHMS_H__ */
diff --git a/arm_compute/graph2/algorithms/BFS.h b/arm_compute/graph2/algorithms/BFS.h
new file mode 100644
index 0000000..d590658
--- /dev/null
+++ b/arm_compute/graph2/algorithms/BFS.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ALGORITHM_BFS_H__
+#define __ARM_COMPUTE_GRAPH2_ALGORITHM_BFS_H__
+
+#include "arm_compute/graph2/Graph.h"
+
+#include <list>
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace detail
+{
+/** Checks if all the input dependencies of a node have been visited
+ *
+ * @param[in] node    Node to check
+ * @param[in] visited Vector that contains the visited information
+ *
+ * @return True if all inputs dependencies have been visited else false
+ */
+inline bool all_inputs_are_visited(const INode *node, const std::vector<bool> &visited)
+{
+    ARM_COMPUTE_ERROR_ON(node == nullptr);
+    const Graph *graph = node->graph();
+    ARM_COMPUTE_ERROR_ON(graph == nullptr);
+
+    bool are_all_visited = true;
+    for(const auto &input_edge_id : node->input_edges())
+    {
+        if(input_edge_id != EmptyNodeID)
+        {
+            const Edge *input_edge = graph->edge(input_edge_id);
+            ARM_COMPUTE_ERROR_ON(input_edge == nullptr);
+            ARM_COMPUTE_ERROR_ON(input_edge->producer() == nullptr);
+            if(!visited[input_edge->producer_id()])
+            {
+                are_all_visited = false;
+                break;
+            }
+        }
+    }
+
+    return are_all_visited;
+}
+} // namespace detail
+
+/** Breadth first search traversal
+ *
+ * @param g Graph to traverse
+ *
+ * @return A vector with the node id traversal order
+ */
+inline std::vector<NodeID> bfs(Graph &g)
+{
+    std::vector<NodeID> bfs_order_vector;
+
+    // Created visited vector
+    std::vector<bool> visited(g.nodes().size(), false);
+
+    // Create BFS queue
+    std::list<NodeID> queue;
+
+    // Push inputs and mark as visited
+    for(auto &input : g.inputs())
+    {
+        if(input != EmptyNodeID)
+        {
+            visited[input] = true;
+            queue.push_back(input);
+        }
+    }
+
+    // Iterate over vector and edges
+    while(!queue.empty())
+    {
+        // Dequeue a node from queue and process
+        NodeID n = queue.front();
+        bfs_order_vector.push_back(n);
+        queue.pop_front();
+
+        const INode *node = g.node(n);
+        ARM_COMPUTE_ERROR_ON(node == nullptr);
+        for(const auto &eid : node->output_edges())
+        {
+            const Edge *e = g.edge(eid);
+            ARM_COMPUTE_ERROR_ON(e == nullptr);
+            if(!visited[e->consumer_id()] && detail::all_inputs_are_visited(e->consumer(), visited))
+            {
+                visited[e->consumer_id()] = true;
+                queue.push_back(e->consumer_id());
+            }
+        }
+    }
+
+    return bfs_order_vector;
+}
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ALGORITHM_BFS_H__ */
diff --git a/arm_compute/graph2/backends/BackendRegistrar.h b/arm_compute/graph2/backends/BackendRegistrar.h
new file mode 100644
index 0000000..f9905a7
--- /dev/null
+++ b/arm_compute/graph2/backends/BackendRegistrar.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_GRAPH2_BACKEND_REGISTRAR_H__
+#define ARM_COMPUTE_GRAPH2_BACKEND_REGISTRAR_H__
+
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/graph2/backends/BackendRegistry.h"
+
+#include <utility>
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace detail
+{
+/** Helper class to statically register a backend */
+template <typename T>
+class BackendRegistrar final
+{
+public:
+    /** Add a new backend to the backend registry
+     *
+     * @param[in] target Execution target
+     */
+    BackendRegistrar(Target target);
+};
+
+template <typename T>
+inline BackendRegistrar<T>::BackendRegistrar(Target target)
+{
+    BackendRegistry::get().add_backend<T>(target);
+}
+} // namespace detail
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_GRAPH2_BACKEND_REGISTRAR_H__ */
\ No newline at end of file
diff --git a/arm_compute/graph2/backends/BackendRegistry.h b/arm_compute/graph2/backends/BackendRegistry.h
new file mode 100644
index 0000000..9481115
--- /dev/null
+++ b/arm_compute/graph2/backends/BackendRegistry.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_BACKEND_REGISTRY_H__
+#define __ARM_COMPUTE_GRAPH2_BACKEND_REGISTRY_H__
+
+#include "arm_compute/graph2/IDeviceBackend.h"
+#include "arm_compute/graph2/Types.h"
+#include "support/ToolchainSupport.h"
+
+#include <map>
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** Registry holding all the supported backends */
+class BackendRegistry final
+{
+public:
+    /** Gets backend registry instance
+     *
+     * @return Backend registry instance
+     */
+    static BackendRegistry &get();
+    /** Finds a backend in the registry
+     *
+     * @param[in] target Backend target
+     *
+     * @return Pointer to the backend interface if found, else nullptr
+     */
+    IDeviceBackend *find_backend(Target target);
+    /** Checks if a backend for a given target exists
+     *
+     * @param[in] target Execution target
+     *
+     * @return True if exists else false
+     */
+    bool contains(Target target) const;
+    /** Backends accessor
+     *
+     * @return Map containing the registered backends
+     */
+    const std::map<Target, std::unique_ptr<IDeviceBackend>> &backends() const;
+    /** Registers a backend to the registry
+     *
+     * @param[in] target Execution target to register for
+     */
+    template <typename T>
+    void add_backend(Target target);
+
+private:
+    /** Default Constructor */
+    BackendRegistry();
+
+private:
+    std::map<Target, std::unique_ptr<IDeviceBackend>> _registered_backends;
+};
+
+template <typename T>
+inline void BackendRegistry::add_backend(Target target)
+{
+    _registered_backends[target] = support::cpp14::make_unique<T>();
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_BACKEND_REGISTRY_H__ */
diff --git a/arm_compute/graph2/backends/CL/CLDeviceBackend.h b/arm_compute/graph2/backends/CL/CLDeviceBackend.h
new file mode 100644
index 0000000..c48a85f
--- /dev/null
+++ b/arm_compute/graph2/backends/CL/CLDeviceBackend.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CLDEVICEBACKEND_H__
+#define __ARM_COMPUTE_GRAPH2_CLDEVICEBACKEND_H__
+
+#include "arm_compute/graph2/IDeviceBackend.h"
+
+#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+#include "arm_compute/runtime/CL/CLTuner.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** OpenCL device backend */
+class CLDeviceBackend final : public IDeviceBackend
+{
+public:
+    /** Default Constructor */
+    CLDeviceBackend();
+    /** Destructor */
+    ~CLDeviceBackend();
+    /** Switchs on or off the kernel tuning
+     *
+     * @note When true the tuner set is used, if no tuner is set a new default one is created
+     *
+     * @param[in] enable_tuning Enables tuning if false else true
+     */
+    void set_kernel_tuning(bool enable_tuning);
+
+    // Inherited overridden methods
+    void initialize_backend() override;
+    void setup_backend_context(GraphContext &ctx) override;
+    std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
+    std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords) override;
+    std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
+    Status validate_node(const INode &node) override;
+    std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
+
+private:
+    CLTuner           _tuner;     /**< CL kernel tuner */
+    CLBufferAllocator _allocator; /**< CL buffer affinity allocator */
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_CLDEVICEBACKEND_H__
diff --git a/arm_compute/graph2/backends/CL/CLFunctionFactory.h b/arm_compute/graph2/backends/CL/CLFunctionFactory.h
new file mode 100644
index 0000000..94fd2b8
--- /dev/null
+++ b/arm_compute/graph2/backends/CL/CLFunctionFactory.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CLFUNCTIONFACTORY_H__
+#define __ARM_COMPUTE_GRAPH2_CLFUNCTIONFACTORY_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class INode;
+class GraphContext;
+
+namespace backends
+{
+/** Factory for generating OpenCL backend functions **/
+class CLFunctionFactory final
+{
+public:
+    /** Create a backend execution function depending on the node type
+     *
+     * @param[in] node Node to create the backend function for
+     * @param[in] ctx  Context to use
+     *
+     * @return Backend function
+     */
+    static std::unique_ptr<arm_compute::IFunction> create(INode *node, GraphContext &ctx);
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_CLFUNCTIONFACTORY_H__
diff --git a/arm_compute/graph2/backends/CL/CLSubTensorHandle.h b/arm_compute/graph2/backends/CL/CLSubTensorHandle.h
new file mode 100644
index 0000000..5584a8b
--- /dev/null
+++ b/arm_compute/graph2/backends/CL/CLSubTensorHandle.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CLSUBTENSORHANDLE_H__
+#define __ARM_COMPUTE_GRAPH2_CLSUBTENSORHANDLE_H__
+
+#include "arm_compute/graph2/ITensorHandle.h"
+
+#include "arm_compute/runtime/CL/CLSubTensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** OpenCL Sub-Tensor handle interface object **/
+class CLSubTensorHandle final : public ITensorHandle
+{
+public:
+    /** Default constructor
+     *
+     * @param[in] parent_handle Parent tensor handle
+     * @param[in] shape         Sub-Tensor shape
+     * @param[in] coords        Starting coordinates
+     */
+    CLSubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords);
+    /** Destructor: free the tensor's memory */
+    ~CLSubTensorHandle() = default;
+    /** Allow instances of this class to be move constructed */
+    CLSubTensorHandle(CLSubTensorHandle &&) = default;
+    /** Allow instances of this class to be moved */
+    CLSubTensorHandle &operator=(CLSubTensorHandle &&) = default;
+
+    // Inherited overridden methods
+    void                        allocate() override;
+    arm_compute::ITensor       &tensor() override;
+    const arm_compute::ITensor &tensor() const override;
+    void map(bool blocking) override;
+    void unmap() override;
+    bool is_subtensor() const override;
+
+private:
+    arm_compute::CLSubTensor _sub_tensor; /**< Backend Sub-Tensor */
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_CLSUBTENSORHANDLE_H__ */
diff --git a/arm_compute/graph2/backends/CL/CLTensorHandle.h b/arm_compute/graph2/backends/CL/CLTensorHandle.h
new file mode 100644
index 0000000..37d7147
--- /dev/null
+++ b/arm_compute/graph2/backends/CL/CLTensorHandle.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CLTENSORHANDLE_H__
+#define __ARM_COMPUTE_GRAPH2_CLTENSORHANDLE_H__
+
+#include "arm_compute/graph2/ITensorHandle.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** OpenCL Tensor handle interface object **/
+class CLTensorHandle final : public ITensorHandle
+{
+public:
+    /** Default Constructor
+     *
+     * @param[in] info Tensor metadata
+     */
+    CLTensorHandle(const ITensorInfo &info);
+    /** Destructor: free the tensor's memory */
+    ~CLTensorHandle() = default;
+    /** Allow instances of this class to be move constructed */
+    CLTensorHandle(CLTensorHandle &&) = default;
+    /** Allow instances of this class to be moved */
+    CLTensorHandle &operator=(CLTensorHandle &&) = default;
+
+    // Inherited overridden methods
+    void                        allocate() override;
+    arm_compute::ITensor       &tensor() override;
+    const arm_compute::ITensor &tensor() const override;
+    void map(bool blocking) override;
+    void unmap() override;
+    bool is_subtensor() const override;
+
+private:
+    arm_compute::CLTensor _tensor; /**< Backend Tensor */
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_CLTENSORHANDLE_H__ */
diff --git a/arm_compute/graph2/backends/NEON/NEDeviceBackend.h b/arm_compute/graph2/backends/NEON/NEDeviceBackend.h
new file mode 100644
index 0000000..533a2c0
--- /dev/null
+++ b/arm_compute/graph2/backends/NEON/NEDeviceBackend.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NEDEVICEBACKEND_H__
+#define __ARM_COMPUTE_GRAPH2_NEDEVICEBACKEND_H__
+
+#include "arm_compute/graph2/IDeviceBackend.h"
+
+#include "arm_compute/runtime/Allocator.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** NEON device backend */
+class NEDeviceBackend final : public IDeviceBackend
+{
+public:
+    NEDeviceBackend();
+
+    // Inherited overridden methods
+    void initialize_backend() override;
+    void setup_backend_context(GraphContext &ctx) override;
+    std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
+    std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords) override;
+    std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
+    Status validate_node(const INode &node) override;
+    std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
+
+private:
+    Allocator _allocator; /**< NEON backend allocator */
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_NEDEVICEBACKEND_H__
diff --git a/arm_compute/graph2/backends/NEON/NEFunctionFactory.h b/arm_compute/graph2/backends/NEON/NEFunctionFactory.h
new file mode 100644
index 0000000..a065340
--- /dev/null
+++ b/arm_compute/graph2/backends/NEON/NEFunctionFactory.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NEFUNCTIONFACTORY_H__
+#define __ARM_COMPUTE_GRAPH2_NEFUNCTIONFACTORY_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class INode;
+class GraphContext;
+
+namespace backends
+{
+class NEFunctionFactory final
+{
+public:
+    /** Create a backend execution function depending on the node type
+     *
+     * @param[in] node Node to create the backend function for
+     * @param[in] ctx  Context to use
+     *
+     * @return Backend function
+     */
+    static std::unique_ptr<arm_compute::IFunction> create(INode *node, GraphContext &ctx);
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_NEFUNCTIONFACTORY_H__
diff --git a/arm_compute/graph2/backends/NEON/NENodeValidator.h b/arm_compute/graph2/backends/NEON/NENodeValidator.h
new file mode 100644
index 0000000..8e84485
--- /dev/null
+++ b/arm_compute/graph2/backends/NEON/NENodeValidator.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NENODEVALIDATOR_H__
+#define __ARM_COMPUTE_GRAPH2_NENODEVALIDATOR_H__
+
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+// TODO (geopin01) : Add node validator
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_NENODEVALIDATOR_H__
diff --git a/arm_compute/graph2/backends/NEON/NESubTensorHandle.h b/arm_compute/graph2/backends/NEON/NESubTensorHandle.h
new file mode 100644
index 0000000..e027b0c
--- /dev/null
+++ b/arm_compute/graph2/backends/NEON/NESubTensorHandle.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NESUBTENSORHANDLE_H__
+#define __ARM_COMPUTE_GRAPH2_NESUBTENSORHANDLE_H__
+
+#include "arm_compute/graph2/ITensorHandle.h"
+
+#include "arm_compute/runtime/SubTensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** NEON Sub-Tensor handle interface object **/
+class NESubTensorHandle final : public ITensorHandle
+{
+public:
+    /** Default constructor
+     *
+     * @param[in] parent_handle Parent tensor handle
+     * @param[in] shape         Sub-Tensor shape
+     * @param[in] coords        Starting coordinates
+     */
+    NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords);
+    /** Destructor: free the tensor's memory */
+    ~NESubTensorHandle() = default;
+    /** Allow instances of this class to be move constructed */
+    NESubTensorHandle(NESubTensorHandle &&) = default;
+    /** Allow instances of this class to be moved */
+    NESubTensorHandle &operator=(NESubTensorHandle &&) = default;
+
+    // Inherited overridden methods
+    void                        allocate() override;
+    arm_compute::ITensor       &tensor() override;
+    const arm_compute::ITensor &tensor() const override;
+    void map(bool blocking) override;
+    void unmap() override;
+    bool is_subtensor() const override;
+
+private:
+    arm_compute::SubTensor _sub_tensor; /**< Backend Sub-Tensor */
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_NESUBTENSORHANDLE_H__ */
diff --git a/arm_compute/graph2/backends/NEON/NETensorHandle.h b/arm_compute/graph2/backends/NEON/NETensorHandle.h
new file mode 100644
index 0000000..c22fcdf
--- /dev/null
+++ b/arm_compute/graph2/backends/NEON/NETensorHandle.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NETENSORHANDLE_H__
+#define __ARM_COMPUTE_GRAPH2_NETENSORHANDLE_H__
+
+#include "arm_compute/graph2/ITensorHandle.h"
+
+#include "arm_compute/runtime/Tensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** NEON Tensor handle interface object **/
+class NETensorHandle final : public ITensorHandle
+{
+public:
+    /** Default Constructor
+     *
+     * @param[in] info Tensor metadata
+     */
+    NETensorHandle(const ITensorInfo &info);
+    /** Destructor: free the tensor's memory */
+    ~NETensorHandle() = default;
+    /** Allow instances of this class to be move constructed */
+    NETensorHandle(NETensorHandle &&) = default;
+    /** Allow instances of this class to be moved */
+    NETensorHandle &operator=(NETensorHandle &&) = default;
+
+    // Inherited overridden methods
+    void                        allocate() override;
+    arm_compute::ITensor       &tensor() override;
+    const arm_compute::ITensor &tensor() const override;
+    void map(bool blocking) override;
+    void unmap() override;
+    bool is_subtensor() const override;
+
+private:
+    arm_compute::Tensor _tensor; /**< Backend Tensor */
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_NETENSORHANDLE_H__ */
diff --git a/arm_compute/graph2/backends/Utils.h b/arm_compute/graph2/backends/Utils.h
new file mode 100644
index 0000000..cc6f516
--- /dev/null
+++ b/arm_compute/graph2/backends/Utils.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_BACKENDS_UTILS_H__
+#define __ARM_COMPUTE_GRAPH2_BACKENDS_UTILS_H__
+
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** Creates and configures a named function
+ *
+ * @param[in] name Name of the function
+ * @param[in] args Function arguments
+ *
+ * @return  A configured backend function
+ */
+template <typename FunctionType, typename FunctionNameType, typename... ParameterType>
+std::pair<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_function(FunctionNameType name, ParameterType... args)
+{
+    auto f = arm_compute::support::cpp14::make_unique<FunctionType>();
+    f->configure(std::forward<ParameterType>(args)...);
+    return std::make_pair(std::move(f), name);
+}
+
+/** Creates and configures a named function
+ *
+ * @param[in] name Name of the function
+ * @param[in] mm   Memory manager to use
+ * @param[in] args Function arguments
+ *
+ * @return  A configured backend function
+ */
+template <typename FunctionType, typename FunctionNameType, typename MemoryManagerType, typename... ParameterType>
+std::pair<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_memory_managed_function(FunctionNameType name,
+                                                                                                          MemoryManagerType mm,
+                                                                                                          ParameterType... args)
+{
+    auto f = arm_compute::support::cpp14::make_unique<FunctionType>(mm);
+    f->configure(std::forward<ParameterType>(args)...);
+    return std::make_pair(std::move(f), name);
+}
+
+/** Checks if an operation is in place
+ *
+ * @param[in] input  Pointer to input
+ * @param[in] output Pointer to output
+ *
+ * @return True if output is nullptr or input is equal to the output, else false
+ */
+inline bool is_in_place_operation(void *input, void *output)
+{
+    return (output == nullptr) || (input == output);
+}
+
+/** Returns the memory manager for a given target
+ *
+ * @param[in] ctx    Graph context containing memory management metadata
+ * @param[in] target Target to retrieve the memory manager from
+ *
+ * @return The memory manager for the given target else false
+ */
+inline std::shared_ptr<IMemoryManager> get_memory_manager(GraphContext &ctx, Target target)
+{
+    bool enabled = ctx.is_memory_management_enabled() && (ctx.memory_management_ctx(target) != nullptr);
+    return enabled ? ctx.memory_management_ctx(target)->mm : nullptr;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_GRAPH2_BACKENDS_UTILS_H__ */
diff --git a/arm_compute/graph2/detail/ExecutionHelpers.h b/arm_compute/graph2/detail/ExecutionHelpers.h
new file mode 100644
index 0000000..acd1654
--- /dev/null
+++ b/arm_compute/graph2/detail/ExecutionHelpers.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_DETAIL_EXECUTION_HELPERS_H__
+#define __ARM_COMPUTE_GRAPH2_DETAIL_EXECUTION_HELPERS_H__
+
+#include "arm_compute/graph2/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+class GraphContext;
+class ExecutionWorkload;
+class Tensor;
+
+namespace detail
+{
+/** Initializes the available backends **/
+void default_initialize_backends();
+/** Configures all nodes of a graph
+ *
+ * @param[in] g Graph to configure
+ */
+void configure_all_tensors(Graph &g);
+/** Allocates all tensors of a graph
+ *
+ * @param[in] g Graph to allocate the tensors
+ */
+void allocate_all_tensors(Graph &g);
+/** Configures all nodes of graph
+ *
+ * @param[in] g   Graph to configure the nodes
+ * @param[in] ctx Graph context to use
+ *
+ * @return The execution workload
+ */
+ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx);
+/** Calls accessor of a given tensor
+ *
+ * @param[in] tensor The tensor of which the accessor should be called
+ */
+void call_tensor_accessor(Tensor *tensor);
+/** Call all const node accessors
+ *
+ * @param[in] g Graph containing the const nodes
+ */
+void call_all_const_node_accessors(Graph &g);
+/** Call all input node accessors
+ *
+ * @param[in] workload Workload to execute
+ */
+void call_all_input_node_accessors(ExecutionWorkload &workload);
+/** Call all output node accessors
+ *
+ * @param[in] workload Workload to execute
+ */
+void call_all_output_node_accessors(ExecutionWorkload &workload);
+/** Executes all tasks of a workload
+ *
+ * @param[in] workload Workload to execute
+ */
+void call_all_tasks(ExecutionWorkload &workload);
+} // namespace detail
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_DETAIL_EXECUTION_HELPERS_H__ */
diff --git a/arm_compute/graph2/frontend/ILayer.h b/arm_compute/graph2/frontend/ILayer.h
new file mode 100644
index 0000000..fee0b37
--- /dev/null
+++ b/arm_compute/graph2/frontend/ILayer.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ILAYER_H__
+#define __ARM_COMPUTE_GRAPH2_ILAYER_H__
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+// Forward declarations
+class IStream;
+
+/** ILayer interface **/
+class ILayer
+{
+public:
+    virtual ~ILayer()                       = default;
+    virtual NodeID create_layer(IStream &s) = 0;
+};
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ILAYER_H__ */
diff --git a/arm_compute/graph2/frontend/IStream.h b/arm_compute/graph2/frontend/IStream.h
new file mode 100644
index 0000000..110be52
--- /dev/null
+++ b/arm_compute/graph2/frontend/IStream.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ISTREAM_H__
+#define __ARM_COMPUTE_GRAPH2_ISTREAM_H__
+
+#include "arm_compute/graph2/frontend/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+
+namespace frontend
+{
+// Forward declarations
+class ILayer;
+
+/** Stream interface **/
+class IStream
+{
+public:
+    virtual ~IStream() = default;
+    /** Adds a layer to the stream
+     *
+     * @param[in] layer Layer to add
+     */
+    virtual void add_layer(ILayer &layer) = 0;
+    /** Returns the underlying graph
+     *
+     * @return Underlying graph
+     */
+    virtual Graph &graph() = 0;
+    /** Returns the underlying graph
+     *
+     * @return Underlying graph
+     */
+    virtual const Graph &graph() const = 0;
+    /** Returns the tail node of the Stream
+     *
+     * @return Tail Node ID
+     */
+    NodeID tail_node()
+    {
+        return _tail_node;
+    }
+    /** Returns the stream hints that are currently used
+     *
+     * @return Stream hints
+     */
+    StreamHints &hints()
+    {
+        return _hints;
+    }
+    /** Forwards tail of stream to a given nid
+     *
+     * @param[in] nid NodeID of the updated tail node
+     */
+    void forward_tail(NodeID nid)
+    {
+        _tail_node = (nid != NullTensorID) ? nid : _tail_node;
+    }
+
+protected:
+    StreamHints _hints     = {};              /**< Execution and algorithmic hints */
+    NodeID      _tail_node = { EmptyNodeID }; /**< NodeID pointing to the last(tail) node of the graph */
+};
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ISTREAM_H__ */
diff --git a/arm_compute/graph2/frontend/IStreamOperators.h b/arm_compute/graph2/frontend/IStreamOperators.h
new file mode 100644
index 0000000..1798e4a
--- /dev/null
+++ b/arm_compute/graph2/frontend/IStreamOperators.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ISTREAM_OPERATORS_H__
+#define __ARM_COMPUTE_GRAPH2_ISTREAM_OPERATORS_H__
+
+#include "arm_compute/graph2/frontend/IStream.h"
+#include "arm_compute/graph2/frontend/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+// Forward declarations
+class ILayer;
+
+/** Overloaded stream operator to add a node to the graph
+ *
+ * @param[in, out] s     Stream to add the tensor
+ * @param[in]      layer Layer to be added
+ *
+ * @return Updated stream
+ */
+inline IStream &operator<<(IStream &s, ILayer &&layer)
+{
+    s.add_layer(layer);
+    return s;
+}
+/** Overloaded stream operator to provide a target hint to the graph
+ *
+ * @param[in, out] s           Stream to provide the hint to
+ * @param[in]      target_hint Target hint to be considered
+ *
+ * @return Updated stream
+ */
+inline IStream &operator<<(IStream &s, Target target_hint)
+{
+    s.hints().target_hint = target_hint;
+    return s;
+}
+/** Overloaded stream operator to provide a convolution method hint to the graph
+ *
+ * @param[in, out] s                       Stream to provide the hint to
+ * @param[in]      convolution_method_hint Convolution method hint to be considered
+ *
+ * @return Updated stream
+ */
+inline IStream &operator<<(IStream &s, ConvolutionMethod convolution_method_hint)
+{
+    s.hints().convolution_method_hint = convolution_method_hint;
+    return s;
+}
+/** Overloaded stream operator to provide a depthwise convolution method hint to the graph
+ *
+ * @param[in, out] s                                 Stream to provide the hint to
+ * @param[in]      depthwise_convolution_method_hint Depthwise Convolution method hint to be considered
+ *
+ * @return Updated stream
+ */
+inline IStream &operator<<(IStream &s, DepthwiseConvolutionMethod depthwise_convolution_method_hint)
+{
+    s.hints().depthwise_convolution_method_hint = depthwise_convolution_method_hint;
+    return s;
+}
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ISTREAM_OPERATORS_H__ */
diff --git a/arm_compute/graph2/frontend/Layers.h b/arm_compute/graph2/frontend/Layers.h
new file mode 100644
index 0000000..40274a4
--- /dev/null
+++ b/arm_compute/graph2/frontend/Layers.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_LAYERS_H__
+#define __ARM_COMPUTE_GRAPH2_LAYERS_H__
+
+#include "arm_compute/graph2/GraphBuilder.h"
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/graph2/frontend/ILayer.h"
+#include "arm_compute/graph2/frontend/IStream.h"
+#include "arm_compute/graph2/frontend/SubStream.h"
+
+#include "arm_compute/core/utils/misc/Utility.h"
+
+#include <memory>
+#include <string>
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+/** Input Layer */
+class InputLayer final : public ILayer
+{
+public:
+    InputLayer(TensorDescriptor desc, ITensorAccessorUPtr accessor)
+        : _desc(desc), _accessor(std::move(accessor))
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams common_params = { "", s.hints().target_hint };
+        return GraphBuilder::add_input_node(s.graph(), common_params, _desc, std::move(_accessor));
+    }
+
+private:
+    TensorDescriptor    _desc;
+    ITensorAccessorUPtr _accessor;
+};
+
+/** Output Layer */
+class OutputLayer final : public ILayer
+{
+public:
+    OutputLayer(ITensorAccessorUPtr accessor)
+        : _accessor(std::move(accessor))
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_output_node(s.graph(), common_params, input, std::move(_accessor));
+    }
+
+private:
+    ITensorAccessorUPtr _accessor;
+};
+
+/** Activation Layer */
+class ActivationLayer final : public ILayer
+{
+public:
+    ActivationLayer(ActivationLayerInfo act_info)
+        : _act_info(act_info)
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_activation_node(s.graph(), common_params, input, _act_info);
+    }
+
+private:
+    ActivationLayerInfo _act_info;
+};
+
+/** Batchnormalization Layer */
+class BatchNormalizationLayer final : public ILayer
+{
+public:
+    BatchNormalizationLayer(ITensorAccessorUPtr mean,
+                            ITensorAccessorUPtr var,
+                            ITensorAccessorUPtr gamma   = nullptr,
+                            ITensorAccessorUPtr beta    = nullptr,
+                            float               epsilon = 0.001f)
+        : _mean(std::move(mean)), _var(std::move(var)), _gamma(std::move(gamma)), _beta(std::move(beta)), _epsilon(epsilon)
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        ARM_COMPUTE_ERROR_ON(_mean == nullptr);
+        ARM_COMPUTE_ERROR_ON(_var == nullptr);
+
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_batch_normalization_node(s.graph(), common_params, input, _epsilon,
+                                                          std::move(_mean), std::move(_var), std::move(_beta), std::move(_gamma));
+    }
+
+private:
+    ITensorAccessorUPtr _mean;
+    ITensorAccessorUPtr _var;
+    ITensorAccessorUPtr _gamma;
+    ITensorAccessorUPtr _beta;
+    float               _epsilon;
+};
+
+/** Convolution Layer */
+class ConvolutionLayer final : public ILayer
+{
+public:
+    ConvolutionLayer(unsigned int        conv_width,
+                     unsigned int        conv_height,
+                     unsigned int        ofm,
+                     ITensorAccessorUPtr weights,
+                     ITensorAccessorUPtr bias,
+                     PadStrideInfo       conv_info,
+                     unsigned int        num_groups = 1)
+        : _conv_width(conv_width),
+          _conv_height(conv_height),
+          _ofm(ofm),
+          _conv_info(std::move(conv_info)),
+          _num_groups(num_groups),
+          _weights(std::move(weights)),
+          _bias(std::move(bias))
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        ARM_COMPUTE_UNUSED(_num_groups);
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = { "", s.hints().target_hint };
+        return GraphBuilder::add_convolution_node(s.graph(), common_params, input,
+                                                  Size2D(_conv_width, _conv_height), _ofm, _conv_info,
+                                                  s.hints().convolution_method_hint,
+                                                  std::move(_weights), std::move(_bias));
+    }
+
+private:
+    unsigned int        _conv_width;
+    unsigned int        _conv_height;
+    unsigned int        _ofm;
+    const PadStrideInfo _conv_info;
+    unsigned int        _num_groups;
+    ITensorAccessorUPtr _weights;
+    ITensorAccessorUPtr _bias;
+};
+
+/** Depthwise Convolution Layer */
+class DepthwiseConvolutionLayer final : public ILayer
+{
+public:
+    DepthwiseConvolutionLayer(unsigned int        conv_width,
+                              unsigned int        conv_height,
+                              ITensorAccessorUPtr weights,
+                              ITensorAccessorUPtr bias,
+                              PadStrideInfo       conv_info)
+        : _conv_width(conv_width),
+          _conv_height(conv_height),
+          _conv_info(std::move(conv_info)),
+          _weights(std::move(weights)),
+          _bias(std::move(bias))
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        NodeParams  common_params = { "", s.hints().target_hint };
+        return GraphBuilder::add_depthwise_convolution_node(s.graph(), common_params,
+                                                            input, Size2D(_conv_width, _conv_height), _conv_info,
+                                                            s.hints().depthwise_convolution_method_hint,
+                                                            std::move(_weights), std::move(_bias));
+    }
+
+private:
+    unsigned int        _conv_width;
+    unsigned int        _conv_height;
+    const PadStrideInfo _conv_info;
+    ITensorAccessorUPtr _weights;
+    ITensorAccessorUPtr _bias;
+};
+
+/** Flatten Layer */
+class FlattenLayer final : public ILayer
+{
+public:
+    FlattenLayer()
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_flatten_node(s.graph(), common_params, input);
+    }
+};
+
+/** Fully Connected Layer */
+class FullyConnectedLayer final : public ILayer
+{
+public:
+    FullyConnectedLayer(unsigned int        num_outputs,
+                        ITensorAccessorUPtr weights,
+                        ITensorAccessorUPtr bias)
+        : _num_outputs(num_outputs), _weights(std::move(weights)), _bias(std::move(bias))
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_fully_connected_layer(s.graph(), common_params, input, _num_outputs,
+                                                       std::move(_weights), std::move(_bias));
+    }
+
+private:
+    unsigned int        _num_outputs;
+    ITensorAccessorUPtr _weights;
+    ITensorAccessorUPtr _bias;
+};
+
+/** Normalization Layer */
+class NormalizationLayer final : public ILayer
+{
+public:
+    NormalizationLayer(NormalizationLayerInfo norm_info)
+        : _norm_info(norm_info)
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_normalization_node(s.graph(), common_params, input, _norm_info);
+    }
+
+private:
+    NormalizationLayerInfo _norm_info;
+};
+
+/** Pooling Layer */
+class PoolingLayer final : public ILayer
+{
+public:
+    PoolingLayer(PoolingLayerInfo pool_info)
+        : _pool_info(pool_info)
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_pooling_node(s.graph(), common_params, input, _pool_info);
+    }
+
+private:
+    PoolingLayerInfo _pool_info;
+};
+
+/** Reshape Layer */
+class ReshapeLayer final : public ILayer
+{
+public:
+    ReshapeLayer(TensorShape shape)
+        : _shape(shape)
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_reshape_node(s.graph(), common_params, input, _shape);
+    }
+
+private:
+    TensorShape _shape;
+};
+
+/** Softmax Layer */
+class SoftmaxLayer final : public ILayer
+{
+public:
+    SoftmaxLayer(float beta = 1.0f)
+        : _beta(beta)
+    {
+    }
+
+    NodeID create_layer(IStream &s) override
+    {
+        NodeParams  common_params = { "", s.hints().target_hint };
+        NodeIdxPair input         = { s.tail_node(), 0 };
+        return GraphBuilder::add_softmax_node(s.graph(), common_params, input, _beta);
+    }
+
+private:
+    float _beta;
+};
+
+/** Branch Layer */
+class BranchLayer final : public ILayer
+{
+public:
+    /** Default Constructor
+     *
+     * @param[in] merge_method     Branch merging method
+     * @param[in] sub_stream1      First graph branch
+     * @param[in] sub_stream2      Second graph branch
+     * @param[in] rest_sub_streams Rest sub-graph branches
+     */
+    template <typename... Ts>
+    BranchLayer(BranchMergeMethod merge_method, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+        : _branch_merge_method(merge_method), _sub_streams()
+    {
+        _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
+        _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
+
+        utility::for_each([&](SubStream && sub_stream)
+        {
+            _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+        },
+        std::move(rest_sub_streams)...);
+    }
+    /** Default Constructor
+     *
+     * @param[in] sub_stream Sub-stream
+     */
+    template <typename... Ts>
+    BranchLayer(SubStream &&sub_stream)
+        : _branch_merge_method(BranchMergeMethod::DEPTH_CONCATENATE), _sub_streams()
+    {
+        _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+    }
+    NodeID create_layer(IStream &s) override
+    {
+        NodeID     nid           = EmptyNodeID;
+        NodeParams common_params = { "", s.hints().target_hint };
+        if(_sub_streams.size() == 1 && _sub_streams.at(0) != nullptr)
+        {
+            nid = _sub_streams[0]->tail_node();
+        }
+        else if(_branch_merge_method == BranchMergeMethod::DEPTH_CONCATENATE)
+        {
+            // Collect tail nodes and perform DepthConcatenate
+            std::vector<NodeIdxPair> nodes;
+            for(auto &ss : _sub_streams)
+            {
+                if(ss && (ss->tail_node() != EmptyNodeID))
+                {
+                    const auto tail_node = s.graph().node(ss->tail_node());
+                    if(tail_node != nullptr && tail_node->type() != NodeType::Output)
+                    {
+                        nodes.push_back({ ss->tail_node(), 0 });
+                    }
+                }
+            }
+            nid = GraphBuilder::add_depth_concatenate_node(s.graph(), common_params, nodes);
+        }
+        else
+        {
+            ARM_COMPUTE_ERROR_ON(_sub_streams.size() != 2);
+            NodeIdxPair input0 = { _sub_streams[0]->tail_node(), 0 };
+            NodeIdxPair input1 = { _sub_streams[1]->tail_node(), 0 };
+            nid                = GraphBuilder::add_elementwise_node(s.graph(), common_params, input0, input1, EltwiseOperation::ADD);
+        }
+        return nid;
+    }
+
+private:
+    BranchMergeMethod                       _branch_merge_method;
+    std::vector<std::unique_ptr<SubStream>> _sub_streams;
+};
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_LAYERS_H__ */
diff --git a/arm_compute/graph2/frontend/Stream.h b/arm_compute/graph2/frontend/Stream.h
new file mode 100644
index 0000000..6100975
--- /dev/null
+++ b/arm_compute/graph2/frontend/Stream.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_STREAM_H__
+#define __ARM_COMPUTE_GRAPH2_STREAM_H__
+
+#include "arm_compute/graph2/frontend/IStream.h"
+#include "arm_compute/graph2/frontend/IStreamOperators.h"
+#include "arm_compute/graph2/frontend/Types.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/GraphManager.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+// Forward Declarations
+class ILayer;
+
+/** Stream frontend class to construct simple graphs in a stream fashion */
+class Stream final : public IStream
+{
+public:
+    /** Constructor
+     *
+     * @param[in] id   Stream id
+     * @param[in] name Stream name
+     */
+    Stream(size_t id, std::string name);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    Stream(const Stream &) = delete;
+    /** Default move constructor */
+    Stream(Stream &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    Stream &operator=(const Stream &) = delete;
+    /** Default move assignment operator */
+    Stream &operator=(Stream &&) = default;
+    /** Finalizes the stream for an execution target
+     *
+     * @note enable_tuning only works if the target is OpenCL.
+     * @note tuning increases the execution time of first run of the graph
+     *
+     * @param[in] target                   Execution target
+     * @param[in] enable_tuning            (Optional) Enables the tuning interface. Defaults to false
+     * @param[in] enable_memory_management (Optional) Enables the memory management interface. Defaults to false
+     */
+    void finalize(Target target, bool enable_tuning = false, bool enable_memory_management = false);
+    /** Executes the stream **/
+    void run();
+
+    // Inherited overridden methods
+    void add_layer(ILayer &layer) override;
+    Graph       &graph() override;
+    const Graph &graph() const override;
+
+private:
+    GraphManager _manager; /**< Graph manager */
+    GraphContext _ctx;     /**< Graph context to use */
+    Graph        _g;       /**< Internal graph representation of the stream */
+};
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_STREAM_H__ */
\ No newline at end of file
diff --git a/arm_compute/graph2/frontend/SubStream.h b/arm_compute/graph2/frontend/SubStream.h
new file mode 100644
index 0000000..dee09b7
--- /dev/null
+++ b/arm_compute/graph2/frontend/SubStream.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_SUB_STREAM_H__
+#define __ARM_COMPUTE_GRAPH2_SUB_STREAM_H__
+
+#include "arm_compute/graph2/frontend/IStream.h"
+#include "arm_compute/graph2/frontend/IStreamOperators.h"
+#include "arm_compute/graph2/frontend/Types.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class Graph;
+
+namespace frontend
+{
+// Forward declarations
+class ILayer;
+
+/** Sub stream class*/
+class SubStream final : public IStream
+{
+public:
+    /** Default Constructor
+     *
+     * @param[in] s Parent stream
+     */
+    SubStream(IStream &s);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    SubStream(const SubStream &) = delete;
+    /** Default move constructor */
+    SubStream(SubStream &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    SubStream &operator=(const SubStream &) = delete;
+    /** Default move assignment operator */
+    SubStream &operator=(SubStream &&) = default;
+
+    // Inherited overridden methods
+    void add_layer(ILayer &layer) override;
+    Graph       &graph() override;
+    const Graph &graph() const override;
+
+private:
+    IStream &_s; /**< Parent stream (assume that the lifetime of the parent is longer) */
+};
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_SUB_STREAM_H__ */
diff --git a/arm_compute/graph2/frontend/Types.h b/arm_compute/graph2/frontend/Types.h
new file mode 100644
index 0000000..234b998
--- /dev/null
+++ b/arm_compute/graph2/frontend/Types.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_STREAM_TYPES_H__
+#define __ARM_COMPUTE_GRAPH2_STREAM_TYPES_H__
+
+#include "arm_compute/graph2/Types.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+// Import types for graph
+using graph2::DataType;
+using graph2::TensorShape;
+
+using graph2::ActivationLayerInfo;
+using graph2::NormalizationLayerInfo;
+using graph2::NormType;
+using graph2::PadStrideInfo;
+using graph2::PoolingLayerInfo;
+using graph2::PoolingType;
+using graph2::Target;
+using graph2::ConvolutionMethod;
+using graph2::DepthwiseConvolutionMethod;
+using graph2::TensorDescriptor;
+using graph2::DimensionRoundingType;
+
+/** Branch layer merging method */
+enum class BranchMergeMethod
+{
+    DEPTH_CONCATENATE, /**< Concatenate across depth */
+    ADD                /**< Adds the results of each stream */
+};
+
+/** Hints that can be passed to the stream to expose parameterization */
+struct StreamHints
+{
+    Target                     target_hint                       = { Target::UNSPECIFIED };                 /**< Target execution hint */
+    ConvolutionMethod          convolution_method_hint           = { ConvolutionMethod::DEFAULT };          /**< Convolution method hint */
+    DepthwiseConvolutionMethod depthwise_convolution_method_hint = { DepthwiseConvolutionMethod::DEFAULT }; /**< Depthwise Convolution method hint */
+};
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_STREAM_TYPES_H__ */
\ No newline at end of file
diff --git a/arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h b/arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h
new file mode 100644
index 0000000..bdf2e01
--- /dev/null
+++ b/arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_DEPTH_CONCAT_SUBTENSOR_MUTATOR_H__
+#define __ARM_COMPUTE_GRAPH2_DEPTH_CONCAT_SUBTENSOR_MUTATOR_H__
+
+#include "arm_compute/graph2/IGraphMutator.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Mutation pass to optimize depth concatenation operations by using sub-tensors
+ *
+ * @warning Always run as one of the last mutation pass as optimizations might change the parent of sub-tensors.
+ **/
+class DepthConcatSubTensorMutator final : public IGraphMutator
+{
+public:
+    // Inherited methods overridden
+    virtual void mutate(Graph &g) override;
+    const char *name() override;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_DEPTH_CONCAT_SUBTENSOR_MUTATOR_H__ */
diff --git a/arm_compute/graph2/mutators/GraphMutators.h b/arm_compute/graph2/mutators/GraphMutators.h
new file mode 100644
index 0000000..b432e32
--- /dev/null
+++ b/arm_compute/graph2/mutators/GraphMutators.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_GRAPH_MUTATORS_H__
+#define __ARM_COMPUTE_GRAPH2_GRAPH_MUTATORS_H__
+
+#include "arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h"
+#include "arm_compute/graph2/mutators/InPlaceOperationMutator.h"
+#include "arm_compute/graph2/mutators/NodeFusionMutator.h"
+
+#endif /* __ARM_COMPUTE_GRAPH2_GRAPH_MUTATORS_H__ */
diff --git a/arm_compute/graph2/mutators/InPlaceOperationMutator.h b/arm_compute/graph2/mutators/InPlaceOperationMutator.h
new file mode 100644
index 0000000..7e4018a
--- /dev/null
+++ b/arm_compute/graph2/mutators/InPlaceOperationMutator.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_INPLACE_OPERATION_MUTATOR_H__
+#define __ARM_COMPUTE_GRAPH2_INPLACE_OPERATION_MUTATOR_H__
+
+#include "arm_compute/graph2/IGraphMutator.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+/** Mutation pass to optimize operations that can be performed in-place */
+class InPlaceOperationMutator final : public IGraphMutator
+{
+public:
+    // Inherited methods overridden
+    virtual void mutate(Graph &g) override;
+    const char *name() override;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_INPLACE_OPERATION_MUTATOR_H__ */
diff --git a/arm_compute/graph2/mutators/NodeFusionMutator.h b/arm_compute/graph2/mutators/NodeFusionMutator.h
new file mode 100644
index 0000000..4371bd3
--- /dev/null
+++ b/arm_compute/graph2/mutators/NodeFusionMutator.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NODE_FUSION_MUTATOR_H__
+#define __ARM_COMPUTE_GRAPH2_NODE_FUSION_MUTATOR_H__
+
+#include "arm_compute/graph2/IGraphMutator.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace detail
+{
+/** Fused batch normalization with activation
+ *
+ * @param[in] g Graph to perform operation fusion on
+ */
+void fuse_batch_norm_with_activation(Graph &g);
+} // namespace detail
+
+/** Mutation pass to fuss nodes */
+class NodeFusionMutator final : public IGraphMutator
+{
+public:
+    // Inherited methods overridden
+    virtual void mutate(Graph &g) override;
+    const char *name() override;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_NODE_FUSION_MUTATOR_H__ */
diff --git a/arm_compute/graph2/nodes/ActivationLayerNode.h b/arm_compute/graph2/nodes/ActivationLayerNode.h
new file mode 100644
index 0000000..c377523
--- /dev/null
+++ b/arm_compute/graph2/nodes/ActivationLayerNode.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ACTIVATION_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_ACTIVATION_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class ActivationLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] info Activation Layer information
+     */
+    ActivationLayerNode(ActivationLayerInfo info);
+    /** Activation metadata accessor
+     *
+     * @return The activation info of the layer
+     */
+    ActivationLayerInfo activation_info() const;
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    ActivationLayerInfo _info;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ACTIVATION_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/BatchNormalizationLayerNode.h b/arm_compute/graph2/nodes/BatchNormalizationLayerNode.h
new file mode 100644
index 0000000..a521938
--- /dev/null
+++ b/arm_compute/graph2/nodes/BatchNormalizationLayerNode.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_BATCH_NORMALIZATION_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_BATCH_NORMALIZATION_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class BatchNormalizationLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] epsilon          (Optional) Epsilon parameter. Defaults to 1.f
+     * @param[in] fused_activation (Optional) Fused activation layer. Disabled if not specified
+     */
+    BatchNormalizationLayerNode(float epsilon = 1.f, ActivationLayerInfo fused_activation = ActivationLayerInfo());
+    /** Epsilon parameter accessor
+     *
+     * @return Epsilon parameter
+     */
+    float epsilon() const;
+    /** Returns fused activation
+     *
+     * @return Fused activation
+     */
+    ActivationLayerInfo fused_activation() const;
+    /** Sets fused activation
+     *
+     * @param[in] fused_activation Fused activation to set
+     */
+    void set_fused_activation(ActivationLayerInfo fused_activation);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    float               _epsilon;
+    ActivationLayerInfo _fused_activation;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_BATCH_NORMALIZATION_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/ConstNode.h b/arm_compute/graph2/nodes/ConstNode.h
new file mode 100644
index 0000000..73a2246
--- /dev/null
+++ b/arm_compute/graph2/nodes/ConstNode.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CONST_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_CONST_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class ConstNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] desc Tensor descriptor
+     */
+    ConstNode(TensorDescriptor desc);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    TensorDescriptor _desc;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_CONST_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/ConvolutionLayerNode.h b/arm_compute/graph2/nodes/ConvolutionLayerNode.h
new file mode 100644
index 0000000..1af344e
--- /dev/null
+++ b/arm_compute/graph2/nodes/ConvolutionLayerNode.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CONVOLUTION_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_CONVOLUTION_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class ConvolutionLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] info   Convolution layer attributes
+     * @param[in] method (Optional) Convolution method to use
+     */
+    ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method = ConvolutionMethod::DEFAULT);
+    /** Sets the convolution layer method to use
+     *
+     * @param[in] method Method to use for convolution
+     */
+    void set_convolution_method(ConvolutionMethod method);
+    /** Convolution layer method accessor
+     *
+     * @note This is an indication on which convolution layer implementation to use,
+     *       if it fails to be created the library's heuristic approach will be used
+     *
+     * @return Convolution layer method do be used by the node
+     */
+    ConvolutionMethod convolution_method() const;
+    /** Convolution metadata accessor
+     *
+     * @return Convolution information
+     */
+    PadStrideInfo convolution_info() const;
+    /** Computes convolution output shape
+     *
+     * @param[in] input_shape   Input shape
+     * @param[in] weights_shape Weights shape
+     * @param[in] info          Convolution operation attributes
+     *
+     * @return Output shape
+     */
+    static TensorShape compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    PadStrideInfo     _info;
+    ConvolutionMethod _method;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_CONVOLUTION_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/DepthConcatenateLayerNode.h b/arm_compute/graph2/nodes/DepthConcatenateLayerNode.h
new file mode 100644
index 0000000..617b984
--- /dev/null
+++ b/arm_compute/graph2/nodes/DepthConcatenateLayerNode.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_DEPTH_CONCATENATE_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_DEPTH_CONCATENATE_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class DepthConcatenateLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] total_nodes Number of nodes that will get concatenated
+     */
+    DepthConcatenateLayerNode(unsigned int total_nodes);
+    /** Computes depth concatenations output shape
+     *
+     * @param input_shapes   Shapes of the inputs
+     *
+     * @return Expected output shape
+     */
+    static TensorShape compute_output_shape(const std::vector<TensorShape> &input_shapes);
+    /** Disables or not the depth concatenate node
+     *
+     * @warning This is used when depth concatenate is performed with sub-tensors,
+     *          where this node is used as a placeholder.
+     *
+     * @param[in] is_enabled If true a backend function is created to perform the depth concatenation (involves copying),
+     *                       while if false, no function is created and we assume that subtensors are properly set to simulate
+     *                       a no copy operation.
+     */
+    void set_enabled(bool is_enabled);
+    /** Enabled parameter accessor
+     *
+     * @return True if a backend function is to be created else false
+     */
+    bool is_enabled() const;
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    unsigned int _total_nodes;
+    bool         _is_enabled;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_DEPTH_CONCATENATE_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h b/arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h
new file mode 100644
index 0000000..1b05edf
--- /dev/null
+++ b/arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_DEPTHWISE_CONVOLUTION_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_DEPTHWISE_CONVOLUTION_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class DepthwiseConvolutionLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] info   Convolution layer attributes
+     * @param[in] method Depthwise convolution method to use
+     */
+    DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method = DepthwiseConvolutionMethod::DEFAULT);
+    /** Sets the depthwise convolution method to use
+     *
+     * @param[in] method Depthwise convolution method to use
+     */
+    void set_depthwise_convolution_method(DepthwiseConvolutionMethod method);
+    /** Depthwise convolution layer method accessor
+     *
+     * @note This is an indication on which depthwise implementation to use,
+     *       if it fails to be created the generic approach will be used
+     *
+     * @return Depthwise convolution layer method do be used by the node
+     */
+    DepthwiseConvolutionMethod depthwise_convolution_method() const;
+    /** Convolution metadata accessor
+     *
+     * @return Convolution information
+     */
+    PadStrideInfo convolution_info() const;
+    /** Computes depthwise convolution output shape
+     *
+     * @param[in] input_shape   Input shape
+     * @param[in] weights_shape Weights shape
+     * @param[in] info          Convolution operation attributes
+     *
+     * @return Output shape
+     */
+    static TensorShape compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    PadStrideInfo              _info;
+    DepthwiseConvolutionMethod _method;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_DEPTHWISE_CONVOLUTION_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/EltwiseLayerNode.h b/arm_compute/graph2/nodes/EltwiseLayerNode.h
new file mode 100644
index 0000000..2b217de
--- /dev/null
+++ b/arm_compute/graph2/nodes/EltwiseLayerNode.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_ELTWISE_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_ELTWISE_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class EltwiseLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] op Element-wise operation to perform
+     */
+    EltwiseLayerNode(EltwiseOperation op);
+    /** Eltwise operation accessor
+     *
+     * @return Eltwise operation that is to be performed by the node
+     */
+    EltwiseOperation eltwise_operation() const;
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    EltwiseOperation _op;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_ELTWISE_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/FlattenLayerNode.h b/arm_compute/graph2/nodes/FlattenLayerNode.h
new file mode 100644
index 0000000..de601f5
--- /dev/null
+++ b/arm_compute/graph2/nodes/FlattenLayerNode.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_FLATTEN_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_FLATTEN_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class FlattenLayerNode final : public INode
+{
+public:
+    /** Default Constructor */
+    FlattenLayerNode();
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_FLATTEN_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/FullyConnectedLayerNode.h b/arm_compute/graph2/nodes/FullyConnectedLayerNode.h
new file mode 100644
index 0000000..836f20f
--- /dev/null
+++ b/arm_compute/graph2/nodes/FullyConnectedLayerNode.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_FULLY_CONNECTED_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_FULLY_CONNECTED_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class FullyConnectedLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] num_outputs Number of neurons in the layer
+     */
+    FullyConnectedLayerNode(unsigned int num_outputs);
+    /** Computes weights shape
+     *
+     * @warning Works for inputs with 1D batch space
+     *
+     * @param[in] input_shape Input shape
+     * @param[in] num_outputs Number of output neurons
+     *
+     * @return Weights shape
+     */
+    static TensorShape compute_weights_shape(TensorShape input_shape, unsigned int num_outputs);
+    /** Computes fully connected layer output shape
+     *
+     * @warning Works for inputs with 1D batch space
+     *
+     * @param[in] input_shape Input shape
+     * @param[in] num_outputs Number of output neurons
+     *
+     * @return Output shape
+     */
+    static TensorShape compute_output_shape(TensorShape input_shape, unsigned int num_outputs);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    unsigned int _num_outputs;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_FULLY_CONNECTED_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/InputNode.h b/arm_compute/graph2/nodes/InputNode.h
new file mode 100644
index 0000000..2cad6f8
--- /dev/null
+++ b/arm_compute/graph2/nodes/InputNode.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_INPUT_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_INPUT_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class InputNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] desc Tensor descriptor
+     */
+    InputNode(TensorDescriptor desc);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    TensorDescriptor _desc;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_INPUT_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/Nodes.h b/arm_compute/graph2/nodes/Nodes.h
new file mode 100644
index 0000000..8201361
--- /dev/null
+++ b/arm_compute/graph2/nodes/Nodes.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NODES_H__
+#define __ARM_COMPUTE_GRAPH2_NODES_H__
+
+#include "arm_compute/graph2/nodes/ActivationLayerNode.h"
+#include "arm_compute/graph2/nodes/BatchNormalizationLayerNode.h"
+#include "arm_compute/graph2/nodes/ConstNode.h"
+#include "arm_compute/graph2/nodes/ConvolutionLayerNode.h"
+#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+#include "arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h"
+#include "arm_compute/graph2/nodes/EltwiseLayerNode.h"
+#include "arm_compute/graph2/nodes/FlattenLayerNode.h"
+#include "arm_compute/graph2/nodes/FullyConnectedLayerNode.h"
+#include "arm_compute/graph2/nodes/InputNode.h"
+#include "arm_compute/graph2/nodes/NormalizationLayerNode.h"
+#include "arm_compute/graph2/nodes/OutputNode.h"
+#include "arm_compute/graph2/nodes/PoolingLayerNode.h"
+#include "arm_compute/graph2/nodes/ReshapeLayerNode.h"
+#include "arm_compute/graph2/nodes/SoftmaxLayerNode.h"
+
+#endif /* __ARM_COMPUTE_GRAPH2_NODES_H__ */
diff --git a/arm_compute/graph2/nodes/NodesFwd.h b/arm_compute/graph2/nodes/NodesFwd.h
new file mode 100644
index 0000000..03ca65e
--- /dev/null
+++ b/arm_compute/graph2/nodes/NodesFwd.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NODES_FWD_H__
+#define __ARM_COMPUTE_GRAPH2_NODES_FWD_H__
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class INode;
+class ActivationLayerNode;
+class BatchNormalizationLayerNode;
+class ConstNode;
+class ConvolutionLayerNode;
+class DepthConcatenateLayerNode;
+class DepthwiseConvolutionLayerNode;
+class EltwiseLayerNode;
+class FlattenLayerNode;
+class FullyConnectedLayerNode;
+class InputNode;
+class NormalizationLayerNode;
+class OutputNode;
+class PoolingLayerNode;
+class ReshapeLayerNode;
+class SoftmaxLayerNode;
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_NODES_FWD_H__ */
diff --git a/arm_compute/graph2/nodes/NormalizationLayerNode.h b/arm_compute/graph2/nodes/NormalizationLayerNode.h
new file mode 100644
index 0000000..e2816e9
--- /dev/null
+++ b/arm_compute/graph2/nodes/NormalizationLayerNode.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_NORMALIZATION_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_NORMALIZATION_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class NormalizationLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] norm_info Normalization Layer information
+     */
+    NormalizationLayerNode(NormalizationLayerInfo norm_info);
+    /** Normalization info accessor
+     *
+     * @return Normalization layer info
+     */
+    NormalizationLayerInfo normalization_info() const;
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    NormalizationLayerInfo _info;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_NORMALIZATION_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/OutputNode.h b/arm_compute/graph2/nodes/OutputNode.h
new file mode 100644
index 0000000..94df382
--- /dev/null
+++ b/arm_compute/graph2/nodes/OutputNode.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_OUTPUT_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_OUTPUT_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class OutputNode final : public INode
+{
+public:
+    /** Default Constructor */
+    OutputNode();
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_OUTPUT_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/PoolingLayerNode.h b/arm_compute/graph2/nodes/PoolingLayerNode.h
new file mode 100644
index 0000000..b0c6270
--- /dev/null
+++ b/arm_compute/graph2/nodes/PoolingLayerNode.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_POOLING_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_POOLING_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class PoolingLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] pool_info Pooling Layer information
+     */
+    PoolingLayerNode(PoolingLayerInfo pool_info);
+    /** Pooling metadata accessor
+     *
+     * @return Pooling Layer info
+     */
+    PoolingLayerInfo pooling_info() const;
+    /** Computes pooling output shape
+     *
+     * @param[in] input_shape Input shape
+     * @param[in] info        Pooling operation attributes
+     *
+     * @return Output shape
+     */
+    static TensorShape compute_output_shape(TensorShape input_shape, PoolingLayerInfo info);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    PoolingLayerInfo _info;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_POOLING_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/ReshapeLayerNode.h b/arm_compute/graph2/nodes/ReshapeLayerNode.h
new file mode 100644
index 0000000..89ee46c
--- /dev/null
+++ b/arm_compute/graph2/nodes/ReshapeLayerNode.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_RESHAPE_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_RESHAPE_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class ReshapeLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] shape Reshaped tensor shape
+     */
+    ReshapeLayerNode(TensorShape shape);
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    TensorShape _shape;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_RESHAPE_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/nodes/SoftmaxLayerNode.h b/arm_compute/graph2/nodes/SoftmaxLayerNode.h
new file mode 100644
index 0000000..86decb8
--- /dev/null
+++ b/arm_compute/graph2/nodes/SoftmaxLayerNode.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_SOFTMAX_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH2_SOFTMAX_LAYER_NODE_H__
+
+#include "arm_compute/graph2/INode.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+class SoftmaxLayerNode final : public INode
+{
+public:
+    /** Constructor
+     *
+     * @param[in] beta (Optional) Beta parameter. Defaults to 1
+     */
+    SoftmaxLayerNode(float beta = 1.f);
+    /** Beta parameter accessor
+     *
+     * @return Beta parameter
+     */
+    float beta() const;
+
+    // Inherited overridden methods:
+    Status           validate() override;
+    NodeType         type() const override;
+    bool             forward_descriptors() override;
+    TensorDescriptor configure_output(size_t idx) const override;
+    void accept(INodeVisitor &v) override;
+
+private:
+    float _beta;
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_SOFTMAX_LAYER_NODE_H__ */
diff --git a/arm_compute/graph2/printers/DotGraphPrinter.h b/arm_compute/graph2/printers/DotGraphPrinter.h
new file mode 100644
index 0000000..3b1879c
--- /dev/null
+++ b/arm_compute/graph2/printers/DotGraphPrinter.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_DOTGRAPHPRINTER_H__
+#define __ARM_COMPUTE_GRAPH2_DOTGRAPHPRINTER_H__
+
+#include "arm_compute/graph2/IGraphPrinter.h"
+
+#include "arm_compute/graph2/INodeVisitor.h"
+
+#include <string>
+
+namespace arm_compute
+{
+namespace graph2
+{
+class DotGraphVisitor final : public DefaultNodeVisitor
+{
+public:
+    /** Default Constructor **/
+    DotGraphVisitor() = default;
+    /** Returns the output information of the last visited node
+     *
+     * @return Information of the last visited node
+     */
+    const std::string &info() const;
+
+    // Reveal parent method
+    using DefaultNodeVisitor::visit;
+
+    // Inherited methods overridden
+    void visit(ActivationLayerNode &n) override;
+    void visit(BatchNormalizationLayerNode &n) override;
+    void visit(ConvolutionLayerNode &n) override;
+    void visit(DepthConcatenateLayerNode &n) override;
+    void visit(DepthwiseConvolutionLayerNode &n) override;
+    void visit(EltwiseLayerNode &n) override;
+    void visit(NormalizationLayerNode &n) override;
+    void visit(PoolingLayerNode &n) override;
+    void default_visit() override;
+
+private:
+    std::string _info{};
+};
+
+/** Graph printer interface */
+class DotGraphPrinter final : public IGraphPrinter
+{
+public:
+    // Inherited methods overridden
+    void print(const Graph &g, std::ostream &os) override;
+
+private:
+    /** Print dot graph header
+     *
+     * @param[in]  g  Graph
+     * @param[out] os Output stream to use
+     */
+    void print_header(const Graph &g, std::ostream &os);
+    /** Print dot graph footer
+     *
+     * @param[in]  g  Graph
+     * @param[out] os Output stream to use
+     */
+    void print_footer(const Graph &g, std::ostream &os);
+    /** Prints nodes in dot format
+     *
+     * @param[in]  g  Graph
+     * @param[out] os Output stream to use
+     */
+    void print_nodes(const Graph &g, std::ostream &os);
+    /** Prints edges in dot format
+     *
+     * @param[in]  g  Graph
+     * @param[out] os Output stream to use
+     */
+    void print_edges(const Graph &g, std::ostream &os);
+
+private:
+    DotGraphVisitor _dot_node_visitor = {};
+};
+} // namespace graph2
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH2_DOTGRAPHPRINTER_H__ */
diff --git a/arm_compute/graph2/printers/Printers.h b/arm_compute/graph2/printers/Printers.h
new file mode 100644
index 0000000..0b70139
--- /dev/null
+++ b/arm_compute/graph2/printers/Printers.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_PRINTERS_H__
+#define __ARM_COMPUTE_GRAPH2_PRINTERS_H__
+
+#include "arm_compute/graph2/printers/DotGraphPrinter.h"
+
+#endif /* __ARM_COMPUTE_GRAPH2_PRINTERS_H__ */
diff --git a/arm_compute/runtime/CL/CLSubTensor.h b/arm_compute/runtime/CL/CLSubTensor.h
index b6e9a29..9c37f8b 100644
--- a/arm_compute/runtime/CL/CLSubTensor.h
+++ b/arm_compute/runtime/CL/CLSubTensor.h
@@ -37,6 +37,8 @@
 class CLSubTensor : public ICLTensor
 {
 public:
+    /** Default Constructor */
+    CLSubTensor();
     /** Constructor
      *
      * @param[in] parent        Parent tensor
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 5f8830a..ca805d9 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -49,13 +49,12 @@
 
 /** Function to reshape and transpose the weights. This function calls the following kernels:
  * -# @ref CLWeightsReshapeKernel
- * -# @ref CLGEMMTranspose1xWKernel
  */
 class CLConvolutionLayerReshapeWeights : public IFunction
 {
 public:
     /** Constructor */
-    CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    CLConvolutionLayerReshapeWeights();
     /** Set the input and output tensors.
      *
      * @param[in]  weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
@@ -78,10 +77,7 @@
     void run() override;
 
 private:
-    CLMemoryGroup            _memory_group;
-    CLWeightsReshapeKernel   _weights_reshape_kernel;
-    CLGEMMTranspose1xWKernel _weights_transposed_kernel;
-    CLTensor                 _weights_reshaped;
+    CLWeightsReshapeKernel _weights_reshape_kernel;
 };
 
 /** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions:
@@ -169,9 +165,7 @@
     CLCol2ImKernel                                      _col2im_kernel;
 
     CLTensor _im2col_output;
-    CLTensor _interleave_output;
     CLTensor _weights_reshaped;
-    CLTensor _weights_transposed;
     CLTensor _gemm_output;
     CLTensor _tmp_output;
 
diff --git a/arm_compute/runtime/SubTensor.h b/arm_compute/runtime/SubTensor.h
index ba2f868..603783f 100644
--- a/arm_compute/runtime/SubTensor.h
+++ b/arm_compute/runtime/SubTensor.h
@@ -37,6 +37,8 @@
 class SubTensor : public ITensor
 {
 public:
+    /** Default Constructor */
+    SubTensor();
     /** Constructor
      *
      * @param[in] parent        Parent tensor
diff --git a/examples/SConscript b/examples/SConscript
index 9be9fa9..80bce57 100644
--- a/examples/SConscript
+++ b/examples/SConscript
@@ -57,15 +57,17 @@
         alias = examples_env.Alias(example, prog)
         Default(alias)
     if env['os'] == 'android':
+        Import('arm_compute_graph2_a')
         Import('arm_compute_graph_a')
         Import('arm_compute_core_a')
         Import('arm_compute_a')
         arm_compute_graph_libs = [ arm_compute_a, arm_compute_core_a, "OpenCL"]
-        graph_dependency = arm_compute_graph_a
+        graph_dependency = [arm_compute_graph_a, arm_compute_graph2_a]
     else:
+        Import('arm_compute_graph2_so')
         Import('arm_compute_graph_so')
-        arm_compute_graph_libs = ["arm_compute_graph", "arm_compute", "arm_compute_core"]
-        graph_dependency = arm_compute_graph_so
+        arm_compute_graph_libs = ["arm_compute_graph2", "arm_compute_graph", "arm_compute", "arm_compute_core"]
+        graph_dependency = [arm_compute_graph_so, arm_compute_graph2_so]
 
     graph_utils = examples_env.Object("../utils/GraphUtils.cpp")
     for file in Glob("./graph_*.cpp"):
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index de4afa2..d64512b 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -21,9 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -32,7 +30,7 @@
 #include <tuple>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement Googlenet's network using the Compute Library's graph API
@@ -54,9 +52,11 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int             int_target_hint  = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint            target_hint      = set_target_hint(int_target_hint);
-        ConvolutionMethodHint convolution_hint = ConvolutionMethodHint::GEMM;
+        const int         target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target            target_hint              = set_target_hint2(target);
+        ConvolutionMethod convolution_hint         = ConvolutionMethod::GEMM;
+        bool              enable_tuning            = (target == 2);
+        bool              enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -91,8 +91,8 @@
         }
 
         graph << target_hint
-              << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor)))
+              << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor)))
               << ConvolutionLayer(
                   7U, 7U, 64U,
                   get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv1/conv1_7x7_s2_w.npy"),
@@ -133,10 +133,10 @@
                   get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_w.npy"),
                   get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_b.npy"))
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -145,7 +145,7 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "GoogleNet" };
 
     BranchLayer get_inception_node(const std::string &data_path, std::string &&param_path,
                                    unsigned int a_filt,
@@ -154,7 +154,7 @@
                                    unsigned int d_filt)
     {
         std::string total_path = "/cnn_data/googlenet_model/" + param_path + "/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "1x1_w.npy"),
@@ -162,7 +162,7 @@
                 PadStrideInfo(1, 1, 0, 0))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "3x3_reduce_w.npy"),
@@ -176,7 +176,7 @@
                 PadStrideInfo(1, 1, 1, 1))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "5x5_reduce_w.npy"),
@@ -190,7 +190,7 @@
                 PadStrideInfo(1, 1, 2, 2))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL)))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index a10037b..9bb51ba 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -21,9 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -32,15 +30,15 @@
 #include <tuple>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement InceptionV3's network using the Compute Library's graph API
  *
  * @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels )
+ * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] image, [optional] labels )
  */
-class InceptionV3Example final : public Example
+class InceptionV3Example : public Example
 {
 public:
     void do_setup(int argc, char **argv) override
@@ -53,8 +51,10 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -88,8 +88,8 @@
             label     = argv[4];
         }
 
-        graph << target_hint << Tensor(TensorInfo(TensorShape(299U, 299U, 3U, 1U), 1, DataType::F32),
-                                       get_input_accessor(image, std::move(preprocessor), false))
+        graph << target_hint << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
+                                           get_input_accessor(image, std::move(preprocessor), false))
 
               << ConvolutionLayer(3U, 3U, 32U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_weights.npy"),
@@ -100,7 +100,8 @@
                                                               "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << ConvolutionLayer(3U, 3U, 32U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_weights.npy"),
@@ -111,7 +112,8 @@
                                                               "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << ConvolutionLayer(3U, 3U, 64U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_weights.npy"),
@@ -122,7 +124,8 @@
                                                               "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
 
@@ -135,7 +138,8 @@
                                                               "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << ConvolutionLayer(3U, 3U, 192U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_weights.npy"),
@@ -146,7 +150,8 @@
                                                               "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
 
@@ -183,10 +188,10 @@
                                                        "/cnn_data/inceptionv3_model/Logits_Conv2d_1c_1x1_biases.npy"),
                                   PadStrideInfo(1, 1, 0, 0))
               << ReshapeLayer(TensorShape(1001U)) << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
 
     void do_run() override
@@ -195,7 +200,7 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "InceptionV3" };
 
 private:
     BranchLayer get_inception_node_A(const std::string &data_path, std::string &&param_path,
@@ -216,7 +221,7 @@
             conv_id1 = "_1_0c_";
         }
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -227,9 +232,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_weights.npy"),
@@ -240,7 +246,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 5U, 5U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_weights.npy"),
@@ -251,9 +258,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -264,7 +272,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
@@ -275,7 +284,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<2>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_weights.npy"),
@@ -286,9 +296,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
@@ -300,7 +311,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
     }
@@ -310,7 +322,7 @@
                                      std::tuple<unsigned int, unsigned int, unsigned int> b_filters)
     {
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 3U, 3U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_weights.npy"),
@@ -321,9 +333,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -334,7 +347,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_weights.npy"),
@@ -345,7 +359,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<2>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_weights.npy"),
@@ -356,12 +371,11 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
-        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
-            // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+        SubStream i_c(graph);
+        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
     }
@@ -373,7 +387,7 @@
                                      unsigned int d_filt)
     {
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -384,9 +398,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -397,7 +412,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"),
@@ -408,7 +424,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<2>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"),
@@ -419,9 +436,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -432,7 +450,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<1>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_weights.npy"),
@@ -443,7 +462,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<2>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_weights.npy"),
@@ -454,7 +474,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<3>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_weights.npy"),
@@ -465,7 +486,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<4>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_weights.npy"),
@@ -476,9 +498,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
@@ -490,7 +513,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
     }
@@ -500,7 +524,7 @@
                                      std::tuple<unsigned int, unsigned int, unsigned int, unsigned int> b_filters)
     {
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, std::get<0>(a_filters),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -511,7 +535,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(a_filters),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
@@ -522,9 +547,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -535,7 +561,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"),
@@ -546,7 +573,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<2>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"),
@@ -557,7 +585,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<3>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_weights.npy"),
@@ -568,12 +597,11 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
-        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
-            // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+        SubStream i_c(graph);
+        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
     }
@@ -593,7 +621,7 @@
         }
 
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -604,35 +632,10 @@
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b1;
-        i_b1 << ConvolutionLayer(
-                 3U, 1U, std::get<1>(b_filters),
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
-                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-                 PadStrideInfo(1, 1, 1, 0))
-             << BatchNormalizationLayer(
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_mean.npy"),
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"),
-                 get_random_accessor(1.f, 1.f),
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
-
-        SubGraph i_b2;
-        i_b2 << ConvolutionLayer(
-                 1U, 3U, std::get<2>(b_filters),
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_weights.npy"),
-                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-                 PadStrideInfo(1, 1, 0, 1))
-             << BatchNormalizationLayer(
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_mean.npy"),
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_variance.npy"),
-                 get_random_accessor(1.f, 1.f),
-                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
-
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -643,36 +646,41 @@
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
-            << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c1;
-        i_c1 << ConvolutionLayer(
-                 3U, 1U, std::get<2>(c_filters),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_weights.npy"),
+        SubStream i_b1(static_cast<IStream &>(i_b));
+        i_b1 << ConvolutionLayer(
+                 3U, 1U, std::get<1>(b_filters),
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
                  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
                  PadStrideInfo(1, 1, 1, 0))
              << BatchNormalizationLayer(
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_mean.npy"),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_variance.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_mean.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"),
                  get_random_accessor(1.f, 1.f),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"),
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c2;
-        i_c2 << ConvolutionLayer(
-                 1U, 3U, std::get<3>(c_filters),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_weights.npy"),
+        SubStream i_b2(static_cast<IStream &>(i_b));
+        i_b2 << ConvolutionLayer(
+                 1U, 3U, std::get<2>(b_filters),
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_weights.npy"),
                  std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
                  PadStrideInfo(1, 1, 0, 1))
              << BatchNormalizationLayer(
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_mean.npy"),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_variance.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_mean.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_variance.npy"),
                  get_random_accessor(1.f, 1.f),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_beta.npy"),
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        // Merge b1 and b2
+        i_b << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -683,7 +691,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
@@ -694,10 +703,41 @@
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
-            << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_c1(static_cast<IStream &>(i_c));
+        i_c1 << ConvolutionLayer(
+                 3U, 1U, std::get<2>(c_filters),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_weights.npy"),
+                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+                 PadStrideInfo(1, 1, 1, 0))
+             << BatchNormalizationLayer(
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_mean.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_variance.npy"),
+                 get_random_accessor(1.f, 1.f),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"),
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+        SubStream i_c2(static_cast<IStream &>(i_c));
+        i_c2 << ConvolutionLayer(
+                 1U, 3U, std::get<3>(c_filters),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_weights.npy"),
+                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+                 PadStrideInfo(1, 1, 0, 1))
+             << BatchNormalizationLayer(
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_mean.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_variance.npy"),
+                 get_random_accessor(1.f, 1.f),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_beta.npy"),
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+        // Merge i_c1 and i_c2
+        i_c << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
+
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
@@ -709,7 +749,8 @@
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
     }
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index f004b41..d9f6156 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -21,9 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -32,7 +30,7 @@
 #include <tuple>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement InceptionV4's network using the Compute Library's graph API
@@ -52,9 +50,11 @@
         // Create a preprocessor object
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
 
-        // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        // Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -88,8 +88,8 @@
             label     = argv[4];
         }
 
-        graph << target_hint << Tensor(TensorInfo(TensorShape(299U, 299U, 3U, 1U), 1, DataType::F32),
-                                       get_input_accessor(image, std::move(preprocessor), false))
+        graph << target_hint << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
+                                           get_input_accessor(image, std::move(preprocessor), false))
 
               // Conv2d_1a_3x3
               << ConvolutionLayer(3U, 3U, 32U,
@@ -153,10 +153,10 @@
                   get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_weights.npy"),
                   get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Logits_Logits_biases.npy"))
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
 
     void do_run() override
@@ -165,19 +165,17 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "InceptionV4" };
 
 private:
     BranchLayer get_mixed_3a(const std::string &data_path)
     {
         std::string total_path = "/cnn_data/inceptionv4_model/Mixed_3a_";
 
-        SubGraph i_a;
-        i_a << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
-            // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+        SubStream i_a(graph);
+        i_a << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(3U, 3U, 96U,
                                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_3x3_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -195,7 +193,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/Mixed_4a_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(1U, 1U, 64U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -215,7 +213,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(1U, 1U, 64U,
                                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -260,7 +258,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/Mixed_5a_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(3U, 3U, 192U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -271,10 +269,8 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
-        i_b << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
-            // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+        SubStream i_b(graph);
+        i_b << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b));
     }
@@ -283,7 +279,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/" + param_path + "_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(1U, 1U, 96U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -294,7 +290,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(1U, 1U, 64U,
                                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -314,7 +310,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(1U, 1U, 64U,
                                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -343,7 +339,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(1U, 1U, 96U,
                                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"),
@@ -362,7 +358,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/Mixed_6a_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(3U, 3U, 384U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -373,7 +369,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(1U, 1U, 192U,
                                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -402,10 +398,9 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
-        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
-            // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+        SubStream i_c(graph);
+        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
+
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
     }
 
@@ -413,7 +408,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/" + param_path + "_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(1U, 1U, 384U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -424,7 +419,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(1U, 1U, 192U,
                                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -453,7 +448,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(1U, 1U, 192U,
                                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -500,7 +495,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(1U, 1U, 128U,
                                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"),
@@ -519,7 +514,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/Mixed_7a_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(1U, 1U, 192U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -539,7 +534,7 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(1U, 1U, 256U,
                                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -577,10 +572,9 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
-        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true))
-            // TODO (geopin01) : Remove once we understand why a single node graph does not run in CL
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+        SubStream i_c(graph);
+        i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL), true));
+
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c));
     }
 
@@ -588,7 +582,7 @@
     {
         std::string total_path = "/cnn_data/inceptionv4_model/" + param_path + "_";
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(1U, 1U, 256U,
                                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
                                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
@@ -599,7 +593,21 @@
                                        0.001f)
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b1;
+        SubStream i_b(graph);
+        i_b << ConvolutionLayer(
+                1U, 1U, 384U,
+                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
+                std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+                PadStrideInfo(1, 1, 0, 0))
+            << BatchNormalizationLayer(
+                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"),
+                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
+                get_random_accessor(1.f, 1.f),
+                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+        SubStream i_b1(static_cast<IStream &>(i_b));
         i_b1 << ConvolutionLayer(
                  3U, 1U, 256U,
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
@@ -613,7 +621,7 @@
                  0.001f)
              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b2;
+        SubStream i_b2(static_cast<IStream &>(i_b));
         i_b2 << ConvolutionLayer(
                  1U, 3U, 256U,
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_3x1_weights.npy"),
@@ -627,50 +635,10 @@
                  0.001f)
              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
-        i_b << ConvolutionLayer(
-                1U, 1U, 384U,
-                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
-                std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-                PadStrideInfo(1, 1, 0, 0))
-            << BatchNormalizationLayer(
-                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"),
-                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
-                get_random_accessor(1.f, 1.f),
-                get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f)
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
-            << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
+        // Merge b1 and b2
+        i_b << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
 
-        SubGraph i_c1;
-        i_c1 << ConvolutionLayer(
-                 3U, 1U, 256U,
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_weights.npy"),
-                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-                 PadStrideInfo(1, 1, 1, 0))
-             << BatchNormalizationLayer(
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_mean.npy"),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_variance.npy"),
-                 get_random_accessor(1.f, 1.f),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_beta.npy"),
-                 0.001f)
-             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
-
-        SubGraph i_c2;
-        i_c2 << ConvolutionLayer(
-                 1U, 3U, 256U,
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_weights.npy"),
-                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-                 PadStrideInfo(1, 1, 0, 1))
-             << BatchNormalizationLayer(
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_mean.npy"),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_variance.npy"),
-                 get_random_accessor(1.f, 1.f),
-                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_beta.npy"),
-                 0.001f)
-             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
-
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, 384U,
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -706,10 +674,40 @@
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"),
                 0.001f)
-            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
-            << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_c1(static_cast<IStream &>(i_c));
+        i_c1 << ConvolutionLayer(
+                 3U, 1U, 256U,
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_weights.npy"),
+                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+                 PadStrideInfo(1, 1, 1, 0))
+             << BatchNormalizationLayer(
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_mean.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_moving_variance.npy"),
+                 get_random_accessor(1.f, 1.f),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_1x3_BatchNorm_beta.npy"),
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+        SubStream i_c2(static_cast<IStream &>(i_c));
+        i_c2 << ConvolutionLayer(
+                 1U, 3U, 256U,
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_weights.npy"),
+                 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
+                 PadStrideInfo(1, 1, 0, 1))
+             << BatchNormalizationLayer(
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_mean.npy"),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_moving_variance.npy"),
+                 get_random_accessor(1.f, 1.f),
+                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_3x1_BatchNorm_beta.npy"),
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+
+        // Merge i_c1 and i_c2
+        i_c << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
+
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(1U, 1U, 256U,
                                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"),
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index 61bc7bd..e4b8eff 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
+
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -30,7 +30,7 @@
 #include <cstdlib>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement LeNet's network using the Compute Library's graph API
@@ -47,8 +47,10 @@
         unsigned int batches = 4; /** Number of batches */
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -78,7 +80,7 @@
 
         //conv1 << pool1 << conv2 << pool2 << fc1 << act1 << fc2 << smx
         graph << target_hint
-              << Tensor(TensorInfo(TensorShape(28U, 28U, 1U, batches), 1, DataType::F32), DummyAccessor())
+              << InputLayer(TensorDescriptor(TensorShape(28U, 28U, 1U, batches), DataType::F32), get_input_accessor(""))
               << ConvolutionLayer(
                   5U, 5U, 20U,
                   get_weights_accessor(data_path, "/cnn_data/lenet_model/conv1_w.npy"),
@@ -101,10 +103,10 @@
                   get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_w.npy"),
                   get_weights_accessor(data_path, "/cnn_data/lenet_model/ip2_b.npy"))
               << SoftmaxLayer()
-              << Tensor(DummyAccessor(0));
+              << OutputLayer(get_output_accessor(""));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -113,7 +115,7 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "LeNet" };
 };
 
 /** Main program for LeNet
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 1a930dd..4d01055 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -21,8 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -30,7 +29,7 @@
 #include <cstdlib>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement MobileNet's network using the Compute Library's graph API
@@ -51,9 +50,12 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int             int_target_hint  = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint            target_hint      = set_target_hint(int_target_hint);
-        ConvolutionMethodHint convolution_hint = ConvolutionMethodHint::GEMM;
+        const int                  target                     = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target                     target_hint                = set_target_hint2(target);
+        ConvolutionMethod          convolution_hint           = ConvolutionMethod::GEMM;
+        DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::OPTIMIZED_3x3;
+        bool                       enable_tuning              = (target == 2);
+        bool                       enable_memory_management   = true;
 
         // Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160)
         int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0;
@@ -109,8 +111,9 @@
 
         graph << target_hint
               << convolution_hint
-              << Tensor(TensorInfo(TensorShape(spatial_size, spatial_size, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor), false))
+              << depthwise_convolution_hint
+              << InputLayer(TensorDescriptor(TensorShape(spatial_size, spatial_size, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor), false))
               << ConvolutionLayer(
                   3U, 3U, 32U * depth_scale,
                   get_weights_accessor(data_path, "Conv2d_0_weights.npy"),
@@ -121,7 +124,8 @@
                   get_weights_accessor(data_path, "Conv2d_0_BatchNorm_moving_variance.npy"),
                   get_weights_accessor(data_path, "Conv2d_0_BatchNorm_gamma.npy"),
                   get_weights_accessor(data_path, "Conv2d_0_BatchNorm_beta.npy"),
-                  0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
+                  0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
               << get_dwsc_node(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0))
               << get_dwsc_node(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0))
               << get_dwsc_node(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0))
@@ -143,10 +147,10 @@
                   PadStrideInfo(1, 1, 0, 0))
               << ReshapeLayer(TensorShape(1001U))
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -155,26 +159,26 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "MobileNetV1" };
 
     BranchLayer get_dwsc_node(const std::string &data_path, std::string &&param_path,
                               unsigned int  conv_filt,
                               PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
     {
         std::string total_path = param_path + "_";
-        SubGraph    sg;
+        SubStream   sg(graph);
         sg << DepthwiseConvolutionLayer(
                3U, 3U,
                get_weights_accessor(data_path, total_path + "depthwise_depthwise_weights.npy"),
                std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-               dwc_pad_stride_info,
-               true)
+               dwc_pad_stride_info)
            << BatchNormalizationLayer(
                get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_mean.npy"),
                get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_variance.npy"),
                get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_gamma.npy"),
                get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_beta.npy"),
-               0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
+               0.001f)
+           << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))
            << ConvolutionLayer(
                1U, 1U, conv_filt,
                get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
@@ -185,7 +189,8 @@
                get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_moving_variance.npy"),
                get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_gamma.npy"),
                get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_beta.npy"),
-               0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
+               0.001f)
+           << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
 
         return BranchLayer(std::move(sg));
     }
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index e4d31f9..90debb4 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -21,8 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -30,7 +29,7 @@
 #include <cstdlib>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement Microsoft's ResNet50 network using the Compute Library's graph API
@@ -53,8 +52,10 @@
                                                                                                                    false /* Do not convert to BGR */);
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -89,8 +90,8 @@
         }
 
         graph << target_hint
-              << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */))
+              << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */))
               << ConvolutionLayer(
                   7U, 7U, 64U,
                   get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_weights.npy"),
@@ -118,11 +119,12 @@
                   PadStrideInfo(1, 1, 0, 0))
               << FlattenLayer()
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
+
     void do_run() override
     {
         // Run graph
@@ -130,7 +132,7 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "ResNet50" };
 
     void add_residual_block(const std::string &data_path, const std::string &name, unsigned int base_depth, unsigned int num_units, unsigned int stride)
     {
@@ -147,7 +149,7 @@
                 middle_stride = stride;
             }
 
-            SubGraph right;
+            SubStream right(graph);
             right << ConvolutionLayer(
                       1U, 1U, base_depth,
                       get_weights_accessor(data_path, unit_name + "conv1_weights.npy"),
@@ -188,7 +190,7 @@
 
             if(i == 0)
             {
-                SubGraph left;
+                SubStream left(graph);
                 left << ConvolutionLayer(
                          1U, 1U, base_depth * 4,
                          get_weights_accessor(data_path, unit_name + "shortcut_weights.npy"),
@@ -201,20 +203,19 @@
                          get_weights_accessor(data_path, unit_name + "shortcut_BatchNorm_beta.npy"),
                          0.0000100099996416f);
 
-                graph << ResidualLayer(std::move(left), std::move(right));
+                graph << BranchLayer(BranchMergeMethod::ADD, std::move(left), std::move(right));
             }
             else if(middle_stride > 1)
             {
-                SubGraph left;
-                left << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, PadStrideInfo(middle_stride, middle_stride, 0, 0), true))
-                     // TODO (alegil01) : Remove once we understand why a single node graph does not run in CL
-                     << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
+                SubStream left(graph);
+                left << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, PadStrideInfo(middle_stride, middle_stride, 0, 0), true));
 
-                graph << ResidualLayer(std::move(left), std::move(right));
+                graph << BranchLayer(BranchMergeMethod::ADD, std::move(left), std::move(right));
             }
             else
             {
-                graph << ResidualLayer(std::move(right));
+                SubStream left(graph);
+                graph << BranchLayer(BranchMergeMethod::ADD, std::move(left), std::move(right));
             }
 
             graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index d0c823a..b4e00a4 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -21,9 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -32,14 +30,10 @@
 #include <tuple>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 using namespace arm_compute::logging;
 
-namespace
-{
-} // namespace
-
 /** Example demonstrating how to implement Squeezenet's network using the Compute Library's graph API
  *
  * @param[in] argc Number of arguments
@@ -59,8 +53,10 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -95,8 +91,8 @@
         }
 
         graph << target_hint
-              << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor)))
+              << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor)))
               << ConvolutionLayer(
                   7U, 7U, 96U,
                   get_weights_accessor(data_path, "/cnn_data/squeezenet_v1.0_model/conv1_w.npy"),
@@ -171,10 +167,10 @@
               << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
               << FlattenLayer()
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -183,12 +179,12 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "SqueezeNetV1" };
 
     BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
     {
         std::string total_path = "/cnn_data/squeezenet_v1.0_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, expand1_filt,
                 get_weights_accessor(data_path, total_path + "expand1x1_w.npy"),
@@ -196,7 +192,7 @@
                 PadStrideInfo(1, 1, 0, 0))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 3U, 3U, expand3_filt,
                 get_weights_accessor(data_path, total_path + "expand3x3_w.npy"),
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index 189cc02..4ebfd3f 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -21,9 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -32,9 +30,8 @@
 #include <tuple>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
-using namespace arm_compute::logging;
 
 namespace
 {
@@ -59,8 +56,10 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -95,8 +94,8 @@
         }
 
         graph << target_hint
-              << Tensor(TensorInfo(TensorShape(227U, 227U, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor)))
+              << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor)))
               << ConvolutionLayer(
                   3U, 3U, 64U,
                   get_weights_accessor(data_path, "/cnn_data/squeezenet_v1_1_model/conv1_w.npy"),
@@ -171,10 +170,10 @@
               << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
               << FlattenLayer()
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -183,12 +182,12 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "SqueezeNetV1.1" };
 
     BranchLayer get_expand_fire_node(const std::string &data_path, std::string &&param_path, unsigned int expand1_filt, unsigned int expand3_filt)
     {
         std::string total_path = "/cnn_data/squeezenet_v1_1_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream   i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, expand1_filt,
                 get_weights_accessor(data_path, total_path + "expand1x1_w.npy"),
@@ -196,7 +195,7 @@
                 PadStrideInfo(1, 1, 0, 0))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 3U, 3U, expand3_filt,
                 get_weights_accessor(data_path, total_path + "expand3x3_w.npy"),
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index c8cc5b2..faaf579 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -21,8 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -30,7 +29,7 @@
 #include <cstdlib>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 namespace
@@ -41,9 +40,9 @@
  *
  * @return The convolution layer hint
  */
-ConvolutionMethodHint convolution_hint_vgg16(size_t size_in_bytes)
+ConvolutionMethod convolution_hint_vgg16(size_t size_in_bytes)
 {
-    return ((get_mem_free_from_meminfo() * 1024) >= size_in_bytes) ? ConvolutionMethodHint::GEMM : ConvolutionMethodHint::DIRECT;
+    return ((get_mem_free_from_meminfo() * 1024) >= size_in_bytes) ? ConvolutionMethod::GEMM : ConvolutionMethod::DIRECT;
 }
 } // namespace
 
@@ -66,12 +65,14 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        const int target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target    target_hint              = set_target_hint2(target);
+        bool      enable_tuning            = (target == 2);
+        bool      enable_memory_management = true;
 
         // Check if we can use GEMM-based convolutions evaluating if the platform has at least 1.8 GB of available memory
-        const size_t          memory_required  = 1932735283L;
-        ConvolutionMethodHint convolution_hint = convolution_hint_vgg16(memory_required);
+        const size_t      memory_required  = 1932735283L;
+        ConvolutionMethod convolution_hint = convolution_hint_vgg16(memory_required);
 
         // Parse arguments
         if(argc < 2)
@@ -107,8 +108,8 @@
 
         graph << target_hint
               << convolution_hint
-              << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor)))
+              << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor)))
               // Layer 1
               << ConvolutionLayer(
                   3U, 3U, 64U,
@@ -224,10 +225,10 @@
                   get_weights_accessor(data_path, "/cnn_data/vgg16_model/fc8_b.npy"))
               // Softmax
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -236,7 +237,7 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "VGG16" };
 };
 
 /** Main program for VGG16
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 69ae23d..55502e0 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -21,8 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph2.h"
 #include "support/ToolchainSupport.h"
 #include "utils/GraphUtils.h"
 #include "utils/Utils.h"
@@ -30,7 +29,7 @@
 #include <cstdlib>
 
 using namespace arm_compute::utils;
-using namespace arm_compute::graph;
+using namespace arm_compute::graph2::frontend;
 using namespace arm_compute::graph_utils;
 
 /** Example demonstrating how to implement VGG19's network using the Compute Library's graph API
@@ -52,9 +51,11 @@
         std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
-        const int             int_target_hint  = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint            target_hint      = set_target_hint(int_target_hint);
-        ConvolutionMethodHint convolution_hint = ConvolutionMethodHint::DIRECT;
+        const int         target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+        Target            target_hint              = set_target_hint2(target);
+        ConvolutionMethod convolution_hint         = ConvolutionMethod::DIRECT;
+        bool              enable_tuning            = (target == 2);
+        bool              enable_memory_management = true;
 
         // Parse arguments
         if(argc < 2)
@@ -90,8 +91,8 @@
 
         graph << target_hint
               << convolution_hint
-              << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
-                        get_input_accessor(image, std::move(preprocessor)))
+              << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
+                            get_input_accessor(image, std::move(preprocessor)))
               // Layer 1
               << ConvolutionLayer(
                   3U, 3U, 64U,
@@ -217,10 +218,10 @@
                   get_weights_accessor(data_path, "/cnn_data/vgg19_model/fc8_b.npy"))
               // Softmax
               << SoftmaxLayer()
-              << Tensor(get_output_accessor(label, 5));
+              << OutputLayer(get_output_accessor(label, 5));
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        graph.finalize(target_hint, enable_tuning, enable_memory_management);
     }
     void do_run() override
     {
@@ -229,7 +230,7 @@
     }
 
 private:
-    Graph graph{};
+    Stream graph{ 0, "VGG19" };
 };
 
 /** Main program for VGG19
diff --git a/src/core/NEON/kernels/NEIm2ColKernel.cpp b/src/core/NEON/kernels/NEIm2ColKernel.cpp
index 4fa329b..dee1608 100644
--- a/src/core/NEON/kernels/NEIm2ColKernel.cpp
+++ b/src/core/NEON/kernels/NEIm2ColKernel.cpp
@@ -319,8 +319,7 @@
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
 
     // Perform validation step
-    ARM_COMPUTE_UNUSED(is_fully_connected);
-    ARM_COMPUTE_UNUSED(is_flatten);
+    ARM_COMPUTE_UNUSED(is_fully_connected, is_flatten);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, is_fully_connected, is_flatten));
 
     _input          = input;
diff --git a/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp b/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp
index ad66acd..b90e813 100644
--- a/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp
+++ b/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,7 +32,7 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <algorithm>
 #include <arm_neon.h>
diff --git a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
index 13d87a0..d91efd2 100644
--- a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
+++ b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
@@ -33,7 +33,7 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 #include <algorithm>
 #include <arm_neon.h>
diff --git a/src/graph2/Graph.cpp b/src/graph2/Graph.cpp
new file mode 100644
index 0000000..ead67bc
--- /dev/null
+++ b/src/graph2/Graph.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/Graph.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+Graph::Graph(GraphID id, std::string name)
+    : _id(id), _name(std::move(name)), _nodes(), _edges(), _tensors(), _tagged_nodes(), _mtx()
+{
+}
+
+bool Graph::remove_node(NodeID nid)
+{
+    if(nid >= _nodes.size())
+    {
+        return false;
+    }
+
+    std::unique_ptr<INode> &node = _nodes[nid];
+
+    // Remove node connections
+    if(node)
+    {
+        for(auto &input_eid : node->_input_edges)
+        {
+            remove_connection(input_eid);
+        }
+        for(auto &outpud_eid : node->_output_edges)
+        {
+            remove_connection(outpud_eid);
+        }
+    }
+
+    node = nullptr;
+
+    return true;
+}
+
+EdgeID Graph::add_connection(NodeID source, size_t source_idx, NodeID sink, size_t sink_idx)
+{
+    std::lock_guard<arm_compute::Mutex> lock(_mtx);
+
+    // Check if node index is valid, if node exists and finally if the connection index is valid
+    ARM_COMPUTE_ERROR_ON((source >= _nodes.size()) || (_nodes[source] == nullptr) || (source_idx >= _nodes[source]->num_outputs()));
+    ARM_COMPUTE_ERROR_ON((sink >= _nodes.size()) || (_nodes[sink] == nullptr) || (sink_idx >= _nodes[sink]->num_inputs()));
+
+    // Get nodes
+    std::unique_ptr<INode> &source_node = _nodes[source];
+    std::unique_ptr<INode> &sink_node   = _nodes[sink];
+
+    // Check for duplicate connections (Check only sink node)
+    Edge *sink_node_edge = sink_node->input_edge(sink_idx);
+    if((sink_node_edge != nullptr) && (sink_node_edge->producer_id() == source) && (sink_node_edge->producer_idx() == source_idx)
+       && (sink_node_edge->consumer_id() == sink) && (sink_node_edge->consumer_idx() == sink_idx))
+    {
+        return sink_node_edge->id();
+    }
+
+    // Check if there is already a tensor associated with output if not create one
+    TensorID tid = source_node->output_id(source_idx);
+    if(tid == NullTensorID)
+    {
+        tid = create_tensor();
+    }
+    std::unique_ptr<Tensor> &tensor = _tensors[tid];
+
+    // Create connections
+    EdgeID eid        = _edges.size();
+    auto   connection = arm_compute::support::cpp14::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
+    _edges.push_back(std::move(connection));
+
+    // Add connections to source and sink nodes
+    source_node->_output_edges.insert(eid);
+    sink_node->_input_edges[sink_idx] = eid;
+
+    // Set tensor output node
+    source_node->_outputs[source_idx] = tid;
+
+    // Bind tensor to the edge
+    tensor->bind_edge(eid);
+
+    // Try and propagate shapes in sink node
+    sink_node->forward_descriptors();
+
+    return eid;
+}
+
+bool Graph::remove_connection(EdgeID eid)
+{
+    if(eid >= _edges.size())
+    {
+        return false;
+    }
+
+    std::unique_ptr<Edge> &edge = _edges[eid];
+
+    // Remove node connections
+    if(edge != nullptr)
+    {
+        // Get tensor bound to the edge
+        if(edge->tensor() != nullptr)
+        {
+            edge->tensor()->unbind_edge(eid);
+        }
+
+        // Remove edges from source node
+        if(edge->producer() != nullptr)
+        {
+            edge->producer()->_output_edges.erase(eid);
+        }
+
+        // Remove edges from sink node
+        if((edge->consumer() != nullptr) && (edge->consumer_idx() < edge->consumer()->_input_edges.size()))
+        {
+            edge->consumer()->_input_edges[edge->consumer_idx()] = EmptyEdgeID;
+        }
+    }
+
+    // Clear edge
+    edge = nullptr;
+
+    return true;
+}
+
+TensorID Graph::create_tensor(TensorDescriptor desc)
+{
+    TensorID tid    = _tensors.size();
+    auto     tensor = support::cpp14::make_unique<Tensor>(tid, desc);
+    _tensors.push_back(std::move(tensor));
+
+    return tid;
+}
+
+std::string Graph::name() const
+{
+    return _name;
+}
+
+GraphID Graph::id() const
+{
+    return _id;
+}
+
+const std::vector<NodeID> &Graph::inputs()
+{
+    return _tagged_nodes[NodeType::Input];
+}
+
+std::vector<std::unique_ptr<INode>> &Graph::nodes()
+{
+    return _nodes;
+}
+
+const std::vector<std::unique_ptr<INode>> &Graph::nodes() const
+{
+    return _nodes;
+}
+
+const std::vector<std::unique_ptr<Edge>> &Graph::edges() const
+{
+    return _edges;
+}
+
+std::vector<std::unique_ptr<Tensor>> &Graph::tensors()
+{
+    return _tensors;
+}
+
+const std::vector<std::unique_ptr<Tensor>> &Graph::tensors() const
+{
+    return _tensors;
+}
+
+const INode *Graph::node(NodeID id) const
+{
+    return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
+}
+
+INode *Graph::node(NodeID id)
+{
+    return (id >= _nodes.size()) ? nullptr : _nodes[id].get();
+}
+
+const Edge *Graph::edge(EdgeID id) const
+{
+    return (id >= _edges.size()) ? nullptr : _edges[id].get();
+}
+
+Edge *Graph::edge(EdgeID id)
+{
+    return (id >= _edges.size()) ? nullptr : _edges[id].get();
+}
+
+const Tensor *Graph::tensor(TensorID id) const
+{
+    return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
+}
+
+Tensor *Graph::tensor(TensorID id)
+{
+    return (id >= _tensors.size()) ? nullptr : _tensors[id].get();
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/GraphBuilder.cpp b/src/graph2/GraphBuilder.cpp
new file mode 100644
index 0000000..aaf70c4
--- /dev/null
+++ b/src/graph2/GraphBuilder.cpp
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/GraphBuilder.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/Utils.h"
+#include "arm_compute/graph2/algorithms/BFS.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+
+#define CHECK_NODEIDX_PAIR(pair, g) \
+    ARM_COMPUTE_ERROR_ON(((pair).node_id >= (g).nodes().size()) || ((g).node((pair).node_id) == nullptr) || ((pair).index >= (g).node((pair).node_id)->num_outputs()));
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace
+{
+Status set_node_params(Graph &g, NodeID nid, NodeParams &params)
+{
+    INode *node = g.node(nid);
+    ARM_COMPUTE_RETURN_ERROR_ON(!node);
+
+    node->set_common_node_parameters(params);
+
+    return Status{};
+}
+Status set_accessor_on_node(Graph &g, NodeID nid, bool is_output, size_t idx, ITensorAccessorUPtr accessor)
+{
+    INode *node = g.node(nid);
+    ARM_COMPUTE_RETURN_ERROR_ON(!node);
+
+    Tensor *tensor = is_output ? node->output(idx) : node->input(idx);
+    ARM_COMPUTE_RETURN_ERROR_ON(!tensor);
+
+    tensor->set_accessor(std::move(accessor));
+
+    return Status{};
+}
+
+NodeID add_const_node_with_name(Graph &g, NodeParams params, const std::string &name, TensorDescriptor desc, ITensorAccessorUPtr accessor)
+{
+    params.name = params.name.empty() ? "" : params.name + name;
+    auto nid    = GraphBuilder::add_const_node(g, params, desc, std::move(accessor));
+    set_node_params(g, nid, params);
+    return nid;
+}
+} // namespace
+
+NodeID GraphBuilder::add_const_node(Graph &g, NodeParams params, TensorDescriptor desc, ITensorAccessorUPtr accessor)
+{
+    auto nid = g.add_node<ConstNode>(desc);
+    set_node_params(g, nid, params);
+    set_accessor_on_node(g, nid, true, 0, std::move(accessor));
+    return nid;
+}
+
+NodeID GraphBuilder::add_input_node(Graph &g, NodeParams params, TensorDescriptor desc, ITensorAccessorUPtr accessor)
+{
+    auto nid = g.add_node<InputNode>(desc);
+    set_node_params(g, nid, params);
+    set_accessor_on_node(g, nid, true, 0, std::move(accessor));
+    return nid;
+}
+
+NodeID GraphBuilder::add_output_node(Graph &g, NodeParams params, NodeIdxPair input, ITensorAccessorUPtr accessor)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<OutputNode>();
+    g.add_connection(input.node_id, input.index, nid, 0);
+    set_node_params(g, nid, params);
+    set_accessor_on_node(g, nid, false, 0, std::move(accessor));
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_activation_node(Graph &g, NodeParams params, NodeIdxPair input, ActivationLayerInfo act_info)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<ActivationLayerNode>(act_info);
+    g.add_connection(input.node_id, input.index, nid, 0);
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_batch_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, float epsilon,
+                                                  ITensorAccessorUPtr mean_accessor, ITensorAccessorUPtr var_accessor,
+                                                  ITensorAccessorUPtr beta_accessor, ITensorAccessorUPtr gamma_accessor)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    bool has_beta  = (beta_accessor != nullptr);
+    bool has_gamma = (gamma_accessor != nullptr);
+
+    // Get input tensor descriptor
+    const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+
+    // Calculate Common Descriptor
+    TensorDescriptor common_desc = input_tensor_desc;
+    common_desc.shape            = TensorShape(common_desc.shape.z());
+
+    // Create mean and nodes
+    auto mean_nid = add_const_node_with_name(g, params, "Mean", common_desc, std::move(mean_accessor));
+    auto var_nid  = add_const_node_with_name(g, params, "Variance", common_desc, std::move(var_accessor));
+
+    // Create beta node
+    NodeID beta_nid = EmptyNodeID;
+    if(has_beta)
+    {
+        beta_nid = add_const_node_with_name(g, params, "Beta", common_desc, std::move(beta_accessor));
+    }
+
+    // Create gamma node
+    NodeID gamma_nid = EmptyNodeID;
+    if(has_gamma)
+    {
+        gamma_nid = add_const_node_with_name(g, params, "Gamma", common_desc, std::move(gamma_accessor));
+    }
+
+    // Create batch normalization node and add connections
+    NodeID batch_norm_nid = g.add_node<BatchNormalizationLayerNode>(epsilon);
+    g.add_connection(input.node_id, input.index, batch_norm_nid, 0);
+    g.add_connection(mean_nid, 0, batch_norm_nid, 1);
+    g.add_connection(var_nid, 0, batch_norm_nid, 2);
+    if(has_beta)
+    {
+        g.add_connection(beta_nid, 0, batch_norm_nid, 3);
+    }
+    if(has_gamma)
+    {
+        g.add_connection(gamma_nid, 0, batch_norm_nid, 4);
+    }
+    set_node_params(g, batch_norm_nid, params);
+
+    return batch_norm_nid;
+}
+
+NodeID GraphBuilder::add_convolution_node(Graph &g, NodeParams params, NodeIdxPair input,
+                                          Size2D kernel_spatial_extend, unsigned int depth, PadStrideInfo conv_info,
+                                          ConvolutionMethod   method,
+                                          ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+    ARM_COMPUTE_ERROR_ON(depth == 0);
+    ARM_COMPUTE_ERROR_ON((kernel_spatial_extend.width == 0) || (kernel_spatial_extend.height == 0));
+
+    bool has_bias = (bias_accessor != nullptr);
+
+    // Get input tensor descriptor
+    const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+
+    // Create weights node
+    TensorDescriptor w_desc = input_tensor_desc;
+    w_desc.shape            = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z(), depth);
+    NodeID w_nid            = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+    // Create bias nodes
+    NodeID b_nid = EmptyNodeID;
+    if(has_bias)
+    {
+        TensorDescriptor b_desc = input_tensor_desc;
+        b_desc.shape            = TensorShape(depth);
+        b_nid                   = add_const_node_with_name(g, params, "Bias", b_desc, std::move(bias_accessor));
+    }
+
+    // Create convolution node and connect
+    NodeID conv_nid = g.add_node<ConvolutionLayerNode>(conv_info, method);
+    g.add_connection(input.node_id, input.index, conv_nid, 0);
+    g.add_connection(w_nid, 0, conv_nid, 1);
+    if(has_bias)
+    {
+        g.add_connection(b_nid, 0, conv_nid, 2);
+    }
+    set_node_params(g, conv_nid, params);
+
+    return conv_nid;
+}
+
+NodeID GraphBuilder::add_depth_concatenate_node(Graph &g, NodeParams params, std::vector<NodeIdxPair> inputs)
+{
+    ARM_COMPUTE_ERROR_ON(inputs.size() == 0);
+
+    NodeID nid = g.add_node<DepthConcatenateLayerNode>(inputs.size());
+
+    unsigned int i = 0;
+    for(const auto &input : inputs)
+    {
+        CHECK_NODEIDX_PAIR(input, g);
+        g.add_connection(input.node_id, input.index, nid, i++);
+    }
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params, NodeIdxPair input, Size2D kernel_spatial_extend, PadStrideInfo conv_info,
+                                                    DepthwiseConvolutionMethod method,
+                                                    ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+    ARM_COMPUTE_ERROR_ON((kernel_spatial_extend.width == 0) || (kernel_spatial_extend.height == 0));
+
+    bool has_bias = (bias_accessor != nullptr);
+
+    // Get input tensor descriptor
+    const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+
+    // Create weights node
+    TensorDescriptor w_desc = input_tensor_desc;
+    w_desc.shape            = TensorShape(kernel_spatial_extend.width, kernel_spatial_extend.height, w_desc.shape.z());
+    NodeID w_nid            = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+    // Create bias nodes
+    NodeID b_nid = EmptyNodeID;
+    if(has_bias)
+    {
+        TensorDescriptor b_desc = input_tensor_desc;
+        b_desc.shape            = TensorShape(b_desc.shape.z());
+        b_nid                   = add_const_node_with_name(g, params, "Bias", b_desc, std::move(bias_accessor));
+    }
+
+    // Create convolution node and connect
+    NodeID conv_nid = g.add_node<DepthwiseConvolutionLayerNode>(conv_info, method);
+    g.add_connection(input.node_id, input.index, conv_nid, 0);
+    g.add_connection(w_nid, 0, conv_nid, 1);
+    if(has_bias)
+    {
+        g.add_connection(b_nid, 0, conv_nid, 2);
+    }
+    set_node_params(g, conv_nid, params);
+
+    return conv_nid;
+}
+
+NodeID GraphBuilder::add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation)
+{
+    CHECK_NODEIDX_PAIR(input0, g);
+    CHECK_NODEIDX_PAIR(input1, g);
+
+    NodeID nid = g.add_node<EltwiseLayerNode>(operation);
+
+    g.add_connection(input0.node_id, input0.index, nid, 0);
+    g.add_connection(input1.node_id, input1.index, nid, 1);
+
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_flatten_node(Graph &g, NodeParams params, NodeIdxPair input)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<FlattenLayerNode>();
+    g.add_connection(input.node_id, input.index, nid, 0);
+
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_fully_connected_layer(Graph &g, NodeParams params, NodeIdxPair input, unsigned int num_outputs,
+                                               ITensorAccessorUPtr weights_accessor, ITensorAccessorUPtr bias_accessor)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+    ARM_COMPUTE_ERROR_ON(num_outputs == 0);
+
+    bool has_bias = (bias_accessor != nullptr);
+
+    // Get input tensor descriptor
+    const TensorDescriptor input_tensor_desc = get_tensor_descriptor(g, g.node(input.node_id)->outputs()[0]);
+
+    // Create weights node
+    TensorDescriptor w_desc = input_tensor_desc;
+    w_desc.shape            = FullyConnectedLayerNode::compute_weights_shape(input_tensor_desc.shape, num_outputs);
+    NodeID w_nid            = add_const_node_with_name(g, params, "Weights", w_desc, std::move(weights_accessor));
+
+    // Create bias nodes
+    NodeID b_nid = EmptyNodeID;
+    if(has_bias)
+    {
+        TensorDescriptor b_desc = input_tensor_desc;
+        b_desc.shape            = TensorShape(num_outputs);
+        b_nid                   = add_const_node_with_name(g, params, "Bias", b_desc, std::move(bias_accessor));
+    }
+
+    // Create convolution node and connect
+    NodeID fc_nid = g.add_node<FullyConnectedLayerNode>(num_outputs);
+    g.add_connection(input.node_id, input.index, fc_nid, 0);
+    g.add_connection(w_nid, 0, fc_nid, 1);
+    if(has_bias)
+    {
+        g.add_connection(b_nid, 0, fc_nid, 2);
+    }
+
+    set_node_params(g, fc_nid, params);
+
+    return fc_nid;
+}
+
+NodeID GraphBuilder::add_normalization_node(Graph &g, NodeParams params, NodeIdxPair input, NormalizationLayerInfo norm_info)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<NormalizationLayerNode>(norm_info);
+    g.add_connection(input.node_id, input.index, nid, 0);
+
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_pooling_node(Graph &g, NodeParams params, NodeIdxPair input, PoolingLayerInfo pool_info)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<PoolingLayerNode>(pool_info);
+    g.add_connection(input.node_id, input.index, nid, 0);
+
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_reshape_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<ReshapeLayerNode>(shape);
+    g.add_connection(input.node_id, input.index, nid, 0);
+
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+
+NodeID GraphBuilder::add_softmax_node(Graph &g, NodeParams params, NodeIdxPair input, float beta)
+{
+    CHECK_NODEIDX_PAIR(input, g);
+
+    NodeID nid = g.add_node<SoftmaxLayerNode>(beta);
+    g.add_connection(input.node_id, input.index, nid, 0);
+
+    set_node_params(g, nid, params);
+
+    return nid;
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/GraphContext.cpp b/src/graph2/GraphContext.cpp
new file mode 100644
index 0000000..88fc521
--- /dev/null
+++ b/src/graph2/GraphContext.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/GraphContext.h"
+#include <arm_compute/graph2.h>
+
+namespace arm_compute
+{
+namespace graph2
+{
+GraphContext::GraphContext()
+    : _tunable(false), _memory_managed(false), _memory_managers()
+{
+}
+
+void GraphContext::enable_tuning(bool enable_tuning)
+{
+    _tunable = enable_tuning;
+}
+
+bool GraphContext::is_tuning_enabled() const
+{
+    return _tunable;
+}
+
+void GraphContext::enable_memory_managenent(bool enable_mm)
+{
+    _memory_managed = enable_mm;
+}
+
+bool GraphContext::is_memory_management_enabled()
+{
+    return _memory_managed;
+}
+
+bool GraphContext::insert_memory_management_ctx(MemoryManagerContext &&memory_ctx)
+{
+    Target target = memory_ctx.target;
+    if(target == Target::UNSPECIFIED || _memory_managers.find(target) != std::end(_memory_managers))
+    {
+        return false;
+    }
+
+    _memory_managers[target] = std::move(memory_ctx);
+    return true;
+}
+
+MemoryManagerContext *GraphContext::memory_management_ctx(Target target)
+{
+    return (_memory_managers.find(target) != std::end(_memory_managers)) ? &_memory_managers[target] : nullptr;
+}
+
+void GraphContext::finalize()
+{
+    for(auto &mm_obj : _memory_managers)
+    {
+        if(mm_obj.second.mm != nullptr)
+        {
+            mm_obj.second.mm->finalize();
+        }
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/GraphManager.cpp b/src/graph2/GraphManager.cpp
new file mode 100644
index 0000000..edbe2cc
--- /dev/null
+++ b/src/graph2/GraphManager.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/GraphManager.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/PassManager.h"
+#include "arm_compute/graph2/Utils.h"
+#include "arm_compute/graph2/detail/ExecutionHelpers.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+GraphManager::GraphManager()
+    : _workloads()
+{
+    detail::default_initialize_backends();
+}
+
+void GraphManager::finalize_graph(Graph &graph, GraphContext &ctx, PassManager &pm, Target target)
+{
+    // Setup graph context if not done manually
+    setup_default_graph_context(ctx);
+
+    // Check if graph has been registered
+    ARM_COMPUTE_ERROR_ON_MSG(_workloads.find(graph.id()) != std::end(_workloads), "Graph is already registered!");
+
+    // Force target to all graph construct
+    // TODO (geopin01) : Support heterogeneous execution
+    Target forced_target = is_target_supported(target) ? target : get_default_target();
+    force_target_to_graph(graph, forced_target);
+
+    // Configure all tensors
+    detail::configure_all_tensors(graph);
+
+    // Apply all mutating passes
+    pm.run_all(graph);
+
+    // TODO (geopin01): Perform a graph validation
+
+    // Perform topological sort
+    // FIXME : Sort nodes and pass sorted indices in configure all nodes
+
+    // Configure all nodes
+    auto workload = detail::configure_all_nodes(graph, ctx);
+    ARM_COMPUTE_ERROR_ON_MSG(workload.tasks.empty(), "Could not configure all nodes!");
+
+    // Allocate all tensors
+    detail::allocate_all_tensors(graph);
+
+    // Call accessors on all Const nodes
+    detail::call_all_const_node_accessors(graph);
+
+    _workloads.insert(std::make_pair(graph.id(), std::move(workload)));
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Created workload for graph with ID : " << graph.id().get() << std::endl);
+
+    // Finalize Graph context
+    ctx.finalize();
+}
+
+void GraphManager::execute_graph(Graph &graph)
+{
+    // Check if graph is finalized
+    auto it = _workloads.find(graph.id());
+    ARM_COMPUTE_ERROR_ON_MSG(it == std::end(_workloads), "Graph is not registered!");
+
+    // Call input accessors
+    detail::call_all_input_node_accessors(it->second);
+
+    // Run graph
+    detail::call_all_tasks(it->second);
+
+    // Call output accessors
+    detail::call_all_output_node_accessors(it->second);
+}
+
+void GraphManager::invalidate_graph(Graph &graph)
+{
+    auto it = _workloads.find(graph.id());
+    ARM_COMPUTE_ERROR_ON_MSG(it == std::end(_workloads), "Graph is not registered!");
+
+    _workloads.erase(it);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/INode.cpp b/src/graph2/INode.cpp
new file mode 100644
index 0000000..28be341
--- /dev/null
+++ b/src/graph2/INode.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/INode.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph2/Edge.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/Tensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// *INDENT-OFF*
+// clang-format off
+INode::INode()
+    : _graph(nullptr), _id(EmptyNodeID), _common_params({ "", Target::UNSPECIFIED}),
+      _outputs(), _input_edges(), _output_edges(), _assigned_target(Target::UNSPECIFIED)
+{
+}
+// clang-format on
+// *INDENT-ON*
+
+void INode::set_graph(Graph *g)
+{
+    ARM_COMPUTE_ERROR_ON(g == nullptr);
+    _graph = g;
+}
+
+void INode::set_id(NodeID id)
+{
+    _id = id;
+}
+
+void INode::set_common_node_parameters(NodeParams common_params)
+{
+    _common_params = std::move(common_params);
+}
+
+void INode::set_requested_target(Target target)
+{
+    _common_params.target = target;
+}
+
+void INode::set_assigned_target(Target target)
+{
+    _assigned_target = target;
+}
+
+void INode::set_output_tensor(TensorID tid, size_t idx)
+{
+    if(tid != NullTensorID && (idx < _outputs.size()) && (_graph->tensor(tid) != nullptr))
+    {
+        ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+        Tensor *updated_tensor = _graph->tensor(tid);
+        _outputs[idx]          = tid;
+
+        // Set tensor to all output edges of the node
+        for(auto &output_edge_id : _output_edges)
+        {
+            auto output_edge = _graph->edge(output_edge_id);
+            if(output_edge != nullptr)
+            {
+                // Unbind edge from current tensor
+                auto current_output_tensor = output_edge->tensor();
+                current_output_tensor->unbind_edge(output_edge->id());
+
+                // Update tensor to edge and rebind tensor
+                output_edge->update_bound_tensor(updated_tensor);
+                updated_tensor->bind_edge(output_edge->id());
+            }
+        }
+    }
+}
+
+NodeID INode::id() const
+{
+    return _id;
+}
+
+std::string INode::name() const
+{
+    return _common_params.name;
+}
+
+const Graph *INode::graph() const
+{
+    return _graph;
+}
+
+Graph *INode::graph()
+{
+    return _graph;
+}
+
+const std::vector<TensorID> &INode::outputs() const
+{
+    return _outputs;
+}
+
+const std::vector<EdgeID> &INode::input_edges() const
+{
+    return _input_edges;
+}
+
+const std::set<EdgeID> &INode::output_edges() const
+{
+    return _output_edges;
+}
+
+TensorID INode::input_id(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    Edge *e = _graph->edge(_input_edges[idx]);
+    return (e != nullptr) ? e->tensor_id() : NullTensorID;
+}
+
+TensorID INode::output_id(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+    return _outputs[idx];
+}
+
+Tensor *INode::input(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    Edge *e = _graph->edge(_input_edges[idx]);
+    return (e != nullptr) ? e->tensor() : nullptr;
+}
+
+Tensor *INode::output(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+    return _graph->tensor(_outputs[idx]);
+}
+
+EdgeID INode::input_edge_id(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    return _input_edges[idx];
+}
+
+Edge *INode::input_edge(size_t idx) const
+{
+    ARM_COMPUTE_ERROR_ON(_graph == nullptr);
+    ARM_COMPUTE_ERROR_ON(idx >= _input_edges.size());
+    return _graph->edge(_input_edges[idx]);
+}
+
+size_t INode::num_inputs() const
+{
+    return _input_edges.size();
+}
+
+size_t INode::num_outputs() const
+{
+    return _outputs.size();
+}
+
+Target INode::requested_target() const
+{
+    return _common_params.target;
+}
+
+Target INode::assigned_target() const
+{
+    return _assigned_target;
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/PassManager.cpp b/src/graph2/PassManager.cpp
new file mode 100644
index 0000000..2fa937b
--- /dev/null
+++ b/src/graph2/PassManager.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/PassManager.h"
+
+#include "arm_compute/graph2/Logger.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+PassManager::PassManager()
+    : _passes()
+{
+}
+
+const std::vector<std::unique_ptr<IGraphMutator>> &PassManager::passes() const
+{
+    return _passes;
+}
+
+IGraphMutator *PassManager::pass(size_t index)
+{
+    return (index >= _passes.size()) ? nullptr : _passes.at(index).get();
+}
+
+void PassManager::append(std::unique_ptr<IGraphMutator> pass)
+{
+    if(pass)
+    {
+        ARM_COMPUTE_LOG_GRAPH_VERBOSE("Appending mutating pass : " << pass->name() << std::endl);
+        _passes.push_back(std::move(pass));
+    }
+}
+
+void PassManager::clear()
+{
+    _passes.clear();
+}
+
+void PassManager::run_all(Graph &g)
+{
+    for(auto &pass : _passes)
+    {
+        if(pass)
+        {
+            ARM_COMPUTE_LOG_GRAPH_INFO("Running mutating pass : " << pass->name() << std::endl);
+            pass->mutate(g);
+        }
+    }
+}
+
+void PassManager::run(Graph &g, size_t index)
+{
+    if(index >= _passes.size())
+    {
+        return;
+    }
+
+    auto &pass = _passes.at(index);
+
+    if(pass != nullptr)
+    {
+        pass->mutate(g);
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/Tensor.cpp b/src/graph2/Tensor.cpp
new file mode 100644
index 0000000..c6054d7
--- /dev/null
+++ b/src/graph2/Tensor.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/Tensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+Tensor::Tensor(TensorID id, TensorDescriptor desc)
+    : _id(id), _desc(desc), _handle(nullptr), _accessor(nullptr), _bound_edges()
+{
+}
+
+TensorID Tensor::id() const
+{
+    return _id;
+}
+
+TensorDescriptor &Tensor::desc()
+{
+    return _desc;
+}
+
+const TensorDescriptor &Tensor::desc() const
+{
+    return _desc;
+}
+
+void Tensor::set_handle(std::unique_ptr<ITensorHandle> backend_tensor)
+{
+    _handle = std::move(backend_tensor);
+}
+
+ITensorHandle *Tensor::handle()
+{
+    return _handle.get();
+}
+
+void Tensor::set_accessor(std::unique_ptr<ITensorAccessor> accessor)
+{
+    _accessor = std::move(accessor);
+}
+
+ITensorAccessor *Tensor::accessor()
+{
+    return _accessor.get();
+}
+
+bool Tensor::call_accessor()
+{
+    // Early exit guard
+    if(!_accessor || !_handle)
+    {
+        return false;
+    }
+
+    // Map tensor
+    _handle->map(true);
+
+    // Return in case of null backend buffer
+    if(_handle->tensor().buffer() == nullptr)
+    {
+        return false;
+    }
+
+    // Call accessor
+    _accessor->access_tensor(_handle->tensor());
+
+    // Unmap tensor
+    _handle->unmap();
+
+    return true;
+}
+
+void Tensor::bind_edge(EdgeID eid)
+{
+    _bound_edges.insert(eid);
+}
+
+void Tensor::unbind_edge(EdgeID eid)
+{
+    _bound_edges.erase(eid);
+}
+
+const std::set<EdgeID> Tensor::bound_edges() const
+{
+    return _bound_edges;
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/Utils.cpp b/src/graph2/Utils.cpp
new file mode 100644
index 0000000..a518c80
--- /dev/null
+++ b/src/graph2/Utils.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/Utils.h"
+
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/backends/BackendRegistry.h"
+#include "arm_compute/graph2/mutators/GraphMutators.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+bool is_target_supported(Target target)
+{
+    return backends::BackendRegistry::get().contains(target);
+}
+
+Target get_default_target()
+{
+    if(is_target_supported(Target::NEON))
+    {
+        return Target::NEON;
+    }
+    if(is_target_supported(Target::CL))
+    {
+        return Target::CL;
+    }
+
+    ARM_COMPUTE_ERROR("No backend exists!");
+}
+
+void force_target_to_graph(Graph &g, Target target)
+{
+    auto &nodes = g.nodes();
+    for(auto &node : nodes)
+    {
+        if(node)
+        {
+            node->set_assigned_target(target);
+        }
+    }
+
+    auto &tensors = g.tensors();
+    for(auto &tensor : tensors)
+    {
+        if(tensor)
+        {
+            tensor->desc().target = target;
+        }
+    }
+}
+
+PassManager create_default_pass_manager()
+{
+    PassManager pm;
+
+    pm.append(support::cpp14::make_unique<InPlaceOperationMutator>());
+    pm.append(support::cpp14::make_unique<NodeFusionMutator>());
+    pm.append(support::cpp14::make_unique<DepthConcatSubTensorMutator>());
+
+    return pm;
+}
+
+/** Default setups a graph Context
+ *
+ * @param[in] ctx Context to default initialize
+ */
+void setup_default_graph_context(GraphContext &ctx)
+{
+    for(const auto &backend : backends::BackendRegistry::get().backends())
+    {
+        backend.second->setup_backend_context(ctx);
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/Workload.cpp b/src/graph2/Workload.cpp
new file mode 100644
index 0000000..3fd36fa
--- /dev/null
+++ b/src/graph2/Workload.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/Workload.h"
+
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/ITensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+void ExecutionTask::operator()()
+{
+    if(task)
+    {
+        task->run();
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/BackendRegistry.cpp b/src/graph2/backends/BackendRegistry.cpp
new file mode 100644
index 0000000..5f1218f
--- /dev/null
+++ b/src/graph2/backends/BackendRegistry.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/BackendRegistry.h"
+
+using namespace arm_compute::graph2::backends;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+BackendRegistry::BackendRegistry()
+    : _registered_backends()
+{
+}
+
+BackendRegistry &BackendRegistry::get()
+{
+    static BackendRegistry instance;
+    return instance;
+}
+
+IDeviceBackend *BackendRegistry::find_backend(Target target)
+{
+    ARM_COMPUTE_ERROR_ON(!contains(target));
+    return _registered_backends[target].get();
+}
+
+bool BackendRegistry::contains(Target target) const
+{
+    auto it = _registered_backends.find(target);
+    return (it != _registered_backends.end());
+}
+
+const std::map<Target, std::unique_ptr<IDeviceBackend>> &BackendRegistry::backends() const
+{
+    return _registered_backends;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/backends/CL/CLDeviceBackend.cpp b/src/graph2/backends/CL/CLDeviceBackend.cpp
new file mode 100644
index 0000000..e060331
--- /dev/null
+++ b/src/graph2/backends/CL/CLDeviceBackend.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLDeviceBackend.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/backends/BackendRegistrar.h"
+#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
+#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/BlobLifetimeManager.h"
+#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include "arm_compute/runtime/PoolManager.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace
+{
+bool file_exists(const std::string &filename)
+{
+    std::ifstream file(filename);
+    return file.good();
+}
+} // namespace
+
+/** Register CL backend */
+static detail::BackendRegistrar<CLDeviceBackend> CLDeviceBackend_registrar(Target::CL);
+
+/** Tuner export file */
+static const std::string tuner_data_filename = "acl_tuner.csv";
+
+CLDeviceBackend::CLDeviceBackend()
+    : _tuner(), _allocator(cl::Context::getDefault())
+{
+}
+
+CLDeviceBackend::~CLDeviceBackend()
+{
+    // TODO (geopin01) : Shouldn't call non exception safe stuff here
+    if(_tuner.tune_new_kernels() && !_tuner.lws_table().empty())
+    {
+        _tuner.save_to_file(tuner_data_filename);
+    }
+}
+
+void CLDeviceBackend::set_kernel_tuning(bool enable_tuning)
+{
+    _tuner.set_tune_new_kernels(enable_tuning);
+}
+
+void CLDeviceBackend::initialize_backend()
+{
+    // Load tuner data if available
+    if(_tuner.lws_table().empty() && file_exists(tuner_data_filename))
+    {
+        _tuner.load_from_file(tuner_data_filename);
+    }
+
+    // Setup Scheduler
+    CLScheduler::get().default_init(&_tuner);
+
+    // Create allocator with new context
+    _allocator = CLBufferAllocator();
+}
+
+void CLDeviceBackend::setup_backend_context(GraphContext &ctx)
+{
+    // Setup tuner
+    set_kernel_tuning(ctx.is_tuning_enabled());
+
+    // Setup a management backend
+    if(ctx.memory_management_ctx(Target::CL) == nullptr)
+    {
+        MemoryManagerContext mm_ctx;
+        mm_ctx.target = Target::CL;
+        mm_ctx.mm     = create_memory_manager(MemoryManagerAffinity::Buffer);
+
+        ctx.insert_memory_management_ctx(std::move(mm_ctx));
+    }
+}
+
+std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tensor)
+{
+    // Get tensor descriptor
+    const TensorDescriptor &tensor_desc = tensor.desc();
+    ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::CL);
+
+    // Create backend tensor handle
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    auto       backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
+
+    return std::move(backend_tensor_handle);
+}
+
+std::unique_ptr<ITensorHandle> CLDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords)
+{
+    if(parent == nullptr)
+    {
+        return nullptr;
+    }
+
+    return support::cpp14::make_unique<CLSubTensorHandle>(parent, shape, coords);
+}
+
+std::unique_ptr<arm_compute::IFunction> CLDeviceBackend::configure_node(INode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring CL node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::CL);
+
+    // Configure node
+    return CLFunctionFactory::create(&node, ctx);
+}
+
+arm_compute::Status CLDeviceBackend::validate_node(const INode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating CL node with ID : " << node.id() << std::endl);
+
+    ARM_COMPUTE_UNUSED(node);
+
+    return Status{};
+}
+
+std::shared_ptr<arm_compute::IMemoryManager> CLDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
+{
+    if(affinity == MemoryManagerAffinity::Offset)
+    {
+        ARM_COMPUTE_LOG_GRAPH_WARNING("CL Backend does not support offset affinity memory management!");
+        return nullptr;
+    }
+
+    auto lifetime_mgr = std::make_shared<BlobLifetimeManager>();
+    auto pool_mgr     = std::make_shared<PoolManager>();
+    auto mm           = std::make_shared<MemoryManagerOnDemand>(lifetime_mgr, pool_mgr);
+
+    mm->set_allocator(&_allocator);
+
+    return mm;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLFunctionsFactory.cpp b/src/graph2/backends/CL/CLFunctionsFactory.cpp
new file mode 100644
index 0000000..bba0cce
--- /dev/null
+++ b/src/graph2/backends/CL/CLFunctionsFactory.cpp
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/TypePrinter.h"
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/graph2/backends/Utils.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/runtime/CL/CLFunctions.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute::utils::cast;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace
+{
+/** Returns backing tensor of a given tensor
+ *
+ * @param[in] tensor Tensor to extract the backing tensor from
+ *
+ * @return Backing tensor if present else nullptr
+ */
+arm_compute::ICLTensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+{
+    arm_compute::ICLTensor *backing_tensor = nullptr;
+    if(tensor != nullptr)
+    {
+        ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph2::Target::CL);
+        // Get backing tensor handle
+        ITensorHandle *tensor_handle = tensor->handle();
+        // Get backing tensor
+        backing_tensor = (tensor_handle != nullptr) ? polymorphic_cast<ICLTensor *>(&tensor_handle->tensor()) : nullptr;
+    }
+
+    return backing_tensor;
+}
+
+/** Create a backend activation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend activation layer function
+ */
+std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL ActivationLayerNode node with ID : " << node.id() << " and Name: " << node.name()
+        << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                *input    = get_backing_tensor(node.input(0));
+    ICLTensor                *output   = get_backing_tensor(node.output(0));
+    const ActivationLayerInfo act_info = node.activation_info();
+
+    // Create function
+    auto func = support::cpp14::make_unique<CLActivationLayer>();
+    func->configure(input, output, act_info);
+
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLActivationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Activation function: " << act_info.activation()
+                               << " a: " << act_info.a()
+                               << " b: " << act_info.b()
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend batch normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend batch normalization layer function
+ */
+std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL BatchNormalization node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+
+    // TODO (geopin01) : Var and mean are compulsory, switch function to accept nullptr as beta and/or gamma
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 5);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                *input     = get_backing_tensor(node.input(0));
+    ICLTensor                *mean      = get_backing_tensor(node.input(1));
+    ICLTensor                *var       = get_backing_tensor(node.input(2));
+    ICLTensor                *beta      = get_backing_tensor(node.input(3));
+    ICLTensor                *gamma     = get_backing_tensor(node.input(4));
+    ICLTensor                *output    = get_backing_tensor(node.output(0));
+    const float               epsilon   = node.epsilon();
+    const ActivationLayerInfo fused_act = node.fused_activation();
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLBatchNormalizationLayer>();
+    func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLBatchNormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Epsilon: " << epsilon << " "
+                               << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend convolution layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend convolution layer function
+ */
+std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor              *input          = get_backing_tensor(node.input(0));
+    ICLTensor              *weights        = get_backing_tensor(node.input(1));
+    ICLTensor              *biases         = get_backing_tensor(node.input(2));
+    ICLTensor              *output         = get_backing_tensor(node.output(0));
+    const PadStrideInfo     conv_info      = node.convolution_info();
+    const ConvolutionMethod conv_algorithm = node.convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::CL);
+    std::unique_ptr<IFunction>      func;
+    std::string                     func_name;
+    if(conv_algorithm == ConvolutionMethod::DIRECT)
+    {
+        std::tie(func, func_name) = create_named_function<CLDirectConvolutionLayer>(
+                                        std::string("CLDirectConvolutionLayer"), input, weights, biases, output, conv_info);
+    }
+    else if(conv_algorithm == ConvolutionMethod::GEMM)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<CLGEMMConvolutionLayer>(std::string("CLGEMMConvolutionLayer"), mm,
+                                                                                                 input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<CLConvolutionLayer>(std::string("CLConvolutionLayer"), mm,
+                                                                                             input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend layer depth concatenate function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth concatenate layer function
+ */
+std::unique_ptr<arm_compute::IFunction> create_depth_concatenate_layer(DepthConcatenateLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL DepthConcatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Return nullptr if depth concatenate is switched off
+    if(!node.is_enabled())
+    {
+        return nullptr;
+    }
+
+    // Extract IO and info
+    std::vector<arm_compute::ICLTensor *> inputs;
+    for(unsigned int i = 0; i < node.num_inputs(); ++i)
+    {
+        inputs.push_back(get_backing_tensor(node.input(i)));
+    }
+    ICLTensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLDepthConcatenateLayer>();
+    func->configure(inputs, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLDepthConcatenateLayer"
+                               << " Data Type: " << output->info()->data_type()
+                               << " Shape: " << output->info()->tensor_shape()
+                               << " Num Inputs: " << inputs.size()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend layer depth-wise convolution function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth-wise convolution layer function
+ */
+std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name()
+        << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                       *input         = get_backing_tensor(node.input(0));
+    ICLTensor                       *weights       = get_backing_tensor(node.input(1));
+    ICLTensor                       *biases        = get_backing_tensor(node.input(2));
+    ICLTensor                       *output        = get_backing_tensor(node.output(0));
+    const PadStrideInfo              conv_info     = node.convolution_info();
+    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::unique_ptr<IFunction> func;
+    std::string                func_name;
+    if(dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3)
+    {
+        std::tie(func, func_name) = create_named_function<CLDepthwiseConvolutionLayer3x3>(
+                                        std::string("CLDepthwiseConvolutionLayer3x3"), input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_function<CLDepthwiseConvolutionLayer>(
+                                        std::string("CLDepthwiseConvolutionLayer"), input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend element-wise operation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend element-wise operation layer function
+ */
+std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 2);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor             *input1     = get_backing_tensor(node.input(0));
+    ICLTensor             *input2     = get_backing_tensor(node.input(1));
+    ICLTensor             *output     = get_backing_tensor(node.output(0));
+    const EltwiseOperation eltwise_op = node.eltwise_operation();
+    ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+    ARM_COMPUTE_ERROR_ON(input2 == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    std::unique_ptr<IFunction> func = nullptr;
+    std::string                func_name;
+    if(eltwise_op == EltwiseOperation::ADD)
+    {
+        std::tie(func, func_name) = create_named_function<CLArithmeticAddition>(std::string("CLArithmeticAddition"),
+                                                                                input1, input2, output,
+                                                                                ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::SUB)
+    {
+        std::tie(func, func_name) = create_named_function<CLArithmeticSubtraction>(
+                                        std::string("CLArithmeticSubtraction"), input1, input2, output, ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::MUL)
+    {
+        std::tie(func, func_name) = create_named_function<CLPixelWiseMultiplication>(
+                                        std::string("CLPixelWiseMultiplication"), input1, input2, output, 1.f, ConvertPolicy::SATURATE,
+                                        RoundingPolicy::TO_NEAREST_EVEN);
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input1->info()->data_type()
+                               << " Shape : " << input1->info()->tensor_shape()
+                               << std::endl);
+
+    return func;
+}
+
+/** Create a backend flatten layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend flatten layer function
+ */
+std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL FlattenLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input  = get_backing_tensor(node.input(0));
+    ICLTensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLFlattenLayer>();
+    func->configure(input, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLFlattenLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend fully connected layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend fully connected layer function
+ */
+std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL FullyConnectedLayer node with ID : " << node.id() << " and Name: " << node.name()
+        << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input   = get_backing_tensor(node.input(0));
+    ICLTensor *weights = get_backing_tensor(node.input(1));
+    ICLTensor *biases  = get_backing_tensor(node.input(2));
+    ICLTensor *output  = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLFullyConnectedLayer>(get_memory_manager(ctx, Target::CL));
+    func->configure(input, weights, biases, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(weights == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLFullyConnectedLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Biases Shape: " << biases->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend normalization layer function
+ */
+std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL NormalizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                   *input     = get_backing_tensor(node.input(0));
+    ICLTensor                   *output    = get_backing_tensor(node.output(0));
+    const NormalizationLayerInfo norm_info = node.normalization_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLNormalizationLayer>();
+    func->configure(input, output, norm_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLNormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Normalization info: " << norm_info.type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend pooling layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend pooling layer function
+ */
+std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL PoolingLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor             *input     = get_backing_tensor(node.input(0));
+    ICLTensor             *output    = get_backing_tensor(node.output(0));
+    const PoolingLayerInfo pool_info = node.pooling_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLPoolingLayer>();
+    func->configure(input, output, pool_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLPoolingLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Pooling info: " << pool_info.pool_type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend reshape layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend reshape layer function
+ */
+std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input  = get_backing_tensor(node.input(0));
+    ICLTensor *output = get_backing_tensor(node.output(0));
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLReshapeLayer>();
+    func->configure(input, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLReshapeLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend softmax layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend softmax layer function
+ */
+std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL SoftmaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input  = get_backing_tensor(node.input(0));
+    ICLTensor *output = get_backing_tensor(node.output(0));
+    const float beta   = node.beta();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLSoftmaxLayer>(get_memory_manager(ctx, Target::CL));
+    func->configure(input, output, beta);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLSoftmaxLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+} // namespace
+
+std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &ctx)
+{
+    if(node == nullptr)
+    {
+        return nullptr;
+    }
+
+    NodeType type = node->type();
+    switch(type)
+    {
+        case NodeType::ActivationLayer:
+            return create_activation_layer(*polymorphic_downcast<ActivationLayerNode *>(node));
+        case NodeType::BatchNormalizationLayer:
+            return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
+        case NodeType::ConvolutionLayer:
+            return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+        case NodeType::DepthConcatenateLayer:
+            return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
+        case NodeType::DepthwiseConvolutionLayer:
+            return create_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+        case NodeType::EltwiseLayer:
+            return create_eltwise_layer(*polymorphic_downcast<EltwiseLayerNode *>(node));
+        case NodeType::FlattenLayer:
+            return create_flatten_layer(*polymorphic_downcast<FlattenLayerNode *>(node));
+        case NodeType::FullyConnectedLayer:
+            return create_fully_connected_layer(*polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
+        case NodeType::NormalizationLayer:
+            return create_normalization_layer(*polymorphic_downcast<NormalizationLayerNode *>(node));
+        case NodeType::PoolingLayer:
+            return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
+        case NodeType::ReshapeLayer:
+            return create_reshape_layer(*polymorphic_downcast<ReshapeLayerNode *>(node));
+        case NodeType::SoftmaxLayer:
+            return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
+        default:
+            return nullptr;
+    }
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLSubTensorHandle.cpp b/src/graph2/backends/CL/CLSubTensorHandle.cpp
new file mode 100644
index 0000000..2954652
--- /dev/null
+++ b/src/graph2/backends/CL/CLSubTensorHandle.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+CLSubTensorHandle::CLSubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords)
+    : _sub_tensor()
+{
+    ARM_COMPUTE_ERROR_ON(!parent_handle);
+    auto parent_tensor = arm_compute::utils::cast::polymorphic_downcast<ICLTensor *>(&parent_handle->tensor());
+    _sub_tensor        = arm_compute::CLSubTensor(parent_tensor, shape, coords);
+}
+
+void CLSubTensorHandle::allocate()
+{
+    // noop
+}
+
+const arm_compute::ITensor &CLSubTensorHandle::tensor() const
+{
+    return _sub_tensor;
+}
+
+arm_compute::ITensor &CLSubTensorHandle::tensor()
+{
+    return _sub_tensor;
+}
+
+void CLSubTensorHandle::map(bool blocking)
+{
+    _sub_tensor.map(blocking);
+}
+
+void CLSubTensorHandle::unmap()
+{
+    _sub_tensor.unmap();
+}
+
+bool CLSubTensorHandle::is_subtensor() const
+{
+    return true;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLTensorHandle.cpp b/src/graph2/backends/CL/CLTensorHandle.cpp
new file mode 100644
index 0000000..f515e0b
--- /dev/null
+++ b/src/graph2/backends/CL/CLTensorHandle.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+CLTensorHandle::CLTensorHandle(const ITensorInfo &info)
+    : _tensor()
+{
+    _tensor.allocator()->init(info);
+}
+
+void CLTensorHandle::allocate()
+{
+    _tensor.allocator()->allocate();
+}
+
+const arm_compute::ITensor &CLTensorHandle::tensor() const
+{
+    return _tensor;
+}
+
+arm_compute::ITensor &CLTensorHandle::tensor()
+{
+    return _tensor;
+}
+
+void CLTensorHandle::map(bool blocking)
+{
+    _tensor.map(blocking);
+}
+
+void CLTensorHandle::unmap()
+{
+    _tensor.unmap();
+}
+
+bool CLTensorHandle::is_subtensor() const
+{
+    return false;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEDeviceBackend.cpp b/src/graph2/backends/NEON/NEDeviceBackend.cpp
new file mode 100644
index 0000000..9f24498
--- /dev/null
+++ b/src/graph2/backends/NEON/NEDeviceBackend.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NEDeviceBackend.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/backends/BackendRegistrar.h"
+#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
+#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/Allocator.h"
+#include "arm_compute/runtime/BlobLifetimeManager.h"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include "arm_compute/runtime/OffsetLifetimeManager.h"
+#include "arm_compute/runtime/PoolManager.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** Register NEON backend */
+static detail::BackendRegistrar<NEDeviceBackend> NEDeviceBackend_registrar(Target::NEON);
+
+NEDeviceBackend::NEDeviceBackend()
+    : _allocator()
+{
+}
+
+void NEDeviceBackend::initialize_backend()
+{
+}
+
+void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
+{
+    if(ctx.memory_management_ctx(Target::NEON) == nullptr)
+    {
+        MemoryManagerContext mm_ctx;
+        mm_ctx.target = Target::NEON;
+        mm_ctx.mm     = create_memory_manager(MemoryManagerAffinity::Buffer);
+
+        ctx.insert_memory_management_ctx(std::move(mm_ctx));
+    }
+}
+
+std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tensor)
+{
+    // Get tensor descriptor
+    const TensorDescriptor &tensor_desc = tensor.desc();
+    ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::NEON);
+
+    // Create backend tensor handle
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    auto       backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
+
+    return std::move(backend_tensor_handle);
+}
+
+std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords)
+{
+    if(parent == nullptr)
+    {
+        return nullptr;
+    }
+
+    return support::cpp14::make_unique<NESubTensorHandle>(parent, shape, coords);
+}
+
+std::unique_ptr<arm_compute::IFunction> NEDeviceBackend::configure_node(INode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring NEON node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::NEON);
+
+    // Configure node
+    return NEFunctionFactory::create(&node, ctx);
+}
+
+arm_compute::Status NEDeviceBackend::validate_node(const INode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NEON node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_UNUSED(node);
+
+    return Status{};
+}
+
+std::shared_ptr<arm_compute::IMemoryManager> NEDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
+{
+    std::shared_ptr<ILifetimeManager> lifetime_mgr = nullptr;
+    if(affinity == MemoryManagerAffinity::Buffer)
+    {
+        lifetime_mgr = std::make_shared<BlobLifetimeManager>();
+    }
+    else
+    {
+        lifetime_mgr = std::make_shared<OffsetLifetimeManager>();
+    }
+    auto pool_mgr = std::make_shared<PoolManager>();
+    auto mm       = std::make_shared<MemoryManagerOnDemand>(lifetime_mgr, pool_mgr);
+
+    mm->set_allocator(&_allocator);
+
+    return mm;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEFunctionFactory.cpp b/src/graph2/backends/NEON/NEFunctionFactory.cpp
new file mode 100644
index 0000000..9332103
--- /dev/null
+++ b/src/graph2/backends/NEON/NEFunctionFactory.cpp
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/TypePrinter.h"
+#include "arm_compute/graph2/backends/Utils.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/runtime/NEON/NEFunctions.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute::utils::cast;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace
+{
+/** Returns backing tensor of a given tensor
+ *
+ * @param[in] tensor Tensor to extract the backing tensor from
+ *
+ * @return Backing tensor if present else nullptr
+ */
+arm_compute::ITensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+{
+    return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : &tensor->handle()->tensor();
+}
+
+/** Create a backend activation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend activation layer function
+ */
+std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON ActivationLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                  *input    = get_backing_tensor(node.input(0));
+    ITensor                  *output   = get_backing_tensor(node.output(0));
+    const ActivationLayerInfo act_info = node.activation_info();
+
+    // Create function
+    auto func = support::cpp14::make_unique<NEActivationLayer>();
+    func->configure(input, output, act_info);
+
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEActivationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Activation function: " << act_info.activation()
+                               << " a: " << act_info.a()
+                               << " b: " << act_info.b()
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend batch normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend batch normalization layer function
+ */
+std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON BatchNormalization node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+
+    // TODO (geopin01) : Var and mean are compulsory, switch function to accept nullptr as beta and/or gamma
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 5);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                  *input     = get_backing_tensor(node.input(0));
+    ITensor                  *mean      = get_backing_tensor(node.input(1));
+    ITensor                  *var       = get_backing_tensor(node.input(2));
+    ITensor                  *beta      = get_backing_tensor(node.input(3));
+    ITensor                  *gamma     = get_backing_tensor(node.input(4));
+    ITensor                  *output    = get_backing_tensor(node.output(0));
+    const float               epsilon   = node.epsilon();
+    const ActivationLayerInfo fused_act = node.fused_activation();
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEBatchNormalizationLayer>();
+    func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEBatchNormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Epsilon: " << epsilon << " "
+                               << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend convolution layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend convolution layer function
+ */
+std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                *input          = get_backing_tensor(node.input(0));
+    ITensor                *weights        = get_backing_tensor(node.input(1));
+    ITensor                *biases         = get_backing_tensor(node.input(2));
+    ITensor                *output         = get_backing_tensor(node.output(0));
+    const PadStrideInfo     conv_info      = node.convolution_info();
+    const ConvolutionMethod conv_algorithm = node.convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::NEON);
+    std::unique_ptr<IFunction>      func;
+    std::string                     func_name;
+    if(conv_algorithm == ConvolutionMethod::DIRECT)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEDirectConvolutionLayer>(std::string("NEDirectConvolutionLayer"), mm,
+                                                                                                   input, weights, biases, output, conv_info);
+    }
+    else if(conv_algorithm == ConvolutionMethod::GEMM)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEGEMMConvolutionLayer>(std::string("NEGEMMConvolutionLayer"), mm,
+                                                                                                 input, weights, biases, output, conv_info);
+    }
+    else if(conv_algorithm == ConvolutionMethod::WINOGRAD)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEWinogradLayer>(std::string("NEWinogradLayer"), mm,
+                                                                                          input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEConvolutionLayer>(std::string("NEConvolutionLayer"), mm,
+                                                                                             input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend layer depth concatenate function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth concatenate layer function
+ */
+std::unique_ptr<arm_compute::IFunction> create_depth_concatenate_layer(DepthConcatenateLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON DepthConcatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Return nullptr if depth concatenate is switched off
+    if(!node.is_enabled())
+    {
+        return nullptr;
+    }
+
+    // Extract IO and info
+    std::vector<arm_compute::ITensor *> inputs;
+    for(unsigned int i = 0; i < node.num_inputs(); ++i)
+    {
+        inputs.push_back(get_backing_tensor(node.input(i)));
+    }
+    ITensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEDepthConcatenateLayer>();
+    func->configure(inputs, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEDepthConcatenateLayer"
+                               << " Data Type: " << output->info()->data_type()
+                               << " Shape: " << output->info()->tensor_shape()
+                               << " Num Inputs: " << inputs.size()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend layer depth-wise convolution function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth-wise convolution layer function
+ */
+std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                         *input         = get_backing_tensor(node.input(0));
+    ITensor                         *weights       = get_backing_tensor(node.input(1));
+    ITensor                         *biases        = get_backing_tensor(node.input(2));
+    ITensor                         *output        = get_backing_tensor(node.output(0));
+    const PadStrideInfo              conv_info     = node.convolution_info();
+    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::unique_ptr<IFunction> func;
+    std::string                func_name;
+    if(dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3)
+    {
+        std::tie(func, func_name) = create_named_function<NEDepthwiseConvolutionLayer3x3>(std::string("NEDepthwiseConvolutionLayer3x3"),
+                                                                                          input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_function<NEDepthwiseConvolutionLayer>(std::string("NEDepthwiseConvolutionLayer"),
+                                                                                       input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend element-wise operation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend element-wise operation layer function
+ */
+std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 2);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor               *input1     = get_backing_tensor(node.input(0));
+    ITensor               *input2     = get_backing_tensor(node.input(1));
+    ITensor               *output     = get_backing_tensor(node.output(0));
+    const EltwiseOperation eltwise_op = node.eltwise_operation();
+    ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+    ARM_COMPUTE_ERROR_ON(input2 == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    std::unique_ptr<IFunction> func = nullptr;
+    std::string                func_name;
+    if(eltwise_op == EltwiseOperation::ADD)
+    {
+        std::tie(func, func_name) = create_named_function<NEArithmeticAddition>(std::string("NEArithmeticAddition"),
+                                                                                input1, input2, output, ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::SUB)
+    {
+        std::tie(func, func_name) = create_named_function<NEArithmeticSubtraction>(std::string("NEArithmeticSubtraction"),
+                                                                                   input1, input2, output, ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::MUL)
+    {
+        std::tie(func, func_name) = create_named_function<NEPixelWiseMultiplication>(std::string("NEPixelWiseMultiplication"),
+                                                                                     input1, input2, output, 1.f,
+                                                                                     ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input1->info()->data_type()
+                               << " Shape : " << input1->info()->tensor_shape()
+                               << std::endl);
+
+    return func;
+}
+
+/** Create a backend flatten layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend flatten layer function
+ */
+std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON FlattenLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor *input  = get_backing_tensor(node.input(0));
+    ITensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEFlattenLayer>();
+    func->configure(input, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEFlattenLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend fully connected layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend fully connected layer function
+ */
+std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON FullyConnectedLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor *input   = get_backing_tensor(node.input(0));
+    ITensor *weights = get_backing_tensor(node.input(1));
+    ITensor *biases  = get_backing_tensor(node.input(2));
+    ITensor *output  = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEFullyConnectedLayer>(get_memory_manager(ctx, Target::NEON));
+    func->configure(input, weights, biases, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(weights == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEFullyConnectedLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend normalization layer function
+ */
+std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON NormalizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                     *input     = get_backing_tensor(node.input(0));
+    ITensor                     *output    = get_backing_tensor(node.output(0));
+    const NormalizationLayerInfo norm_info = node.normalization_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NENormalizationLayer>(get_memory_manager(ctx, Target::NEON));
+    func->configure(input, output, norm_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NENormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Normalization info: " << norm_info.type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend pooling layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend pooling layer function
+ */
+std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON PoolingLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor               *input     = get_backing_tensor(node.input(0));
+    ITensor               *output    = get_backing_tensor(node.output(0));
+    const PoolingLayerInfo pool_info = node.pooling_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEPoolingLayer>();
+    func->configure(input, output, pool_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEPoolingLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Pooling info: " << pool_info.pool_type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend reshape layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend reshape layer function
+ */
+std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor *input  = get_backing_tensor(node.input(0));
+    ITensor *output = get_backing_tensor(node.output(0));
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEReshapeLayer>();
+    func->configure(input, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEReshapeLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend softmax layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend softmax layer function
+ */
+std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON SoftmaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor    *input  = get_backing_tensor(node.input(0));
+    ITensor    *output = get_backing_tensor(node.output(0));
+    const float beta   = node.beta();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NESoftmaxLayer>(get_memory_manager(ctx, Target::NEON));
+    func->configure(input, output, beta);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NESoftmaxLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+} // namespace
+
+std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &ctx)
+{
+    if(node == nullptr)
+    {
+        return nullptr;
+    }
+
+    NodeType type = node->type();
+    switch(type)
+    {
+        case NodeType::ActivationLayer:
+            return create_activation_layer(*polymorphic_downcast<ActivationLayerNode *>(node));
+        case NodeType::BatchNormalizationLayer:
+            return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
+        case NodeType::ConvolutionLayer:
+            return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+        case NodeType::DepthConcatenateLayer:
+            return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
+        case NodeType::DepthwiseConvolutionLayer:
+            return create_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+        case NodeType::EltwiseLayer:
+            return create_eltwise_layer(*polymorphic_downcast<EltwiseLayerNode *>(node));
+        case NodeType::FlattenLayer:
+            return create_flatten_layer(*polymorphic_downcast<FlattenLayerNode *>(node));
+        case NodeType::FullyConnectedLayer:
+            return create_fully_connected_layer(*polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
+        case NodeType::NormalizationLayer:
+            return create_normalization_layer(*polymorphic_downcast<NormalizationLayerNode *>(node), ctx);
+        case NodeType::PoolingLayer:
+            return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
+        case NodeType::ReshapeLayer:
+            return create_reshape_layer(*polymorphic_downcast<ReshapeLayerNode *>(node));
+        case NodeType::SoftmaxLayer:
+            return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
+        default:
+            return nullptr;
+    }
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NESubTensorHandle.cpp b/src/graph2/backends/NEON/NESubTensorHandle.cpp
new file mode 100644
index 0000000..9b3c9b1
--- /dev/null
+++ b/src/graph2/backends/NEON/NESubTensorHandle.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+NESubTensorHandle::NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords)
+    : _sub_tensor()
+{
+    ARM_COMPUTE_ERROR_ON(!parent_handle);
+    _sub_tensor = arm_compute::SubTensor(&parent_handle->tensor(), shape, coords);
+}
+
+void NESubTensorHandle::allocate()
+{
+    // noop
+}
+
+const arm_compute::ITensor &NESubTensorHandle::tensor() const
+{
+    return _sub_tensor;
+}
+
+arm_compute::ITensor &NESubTensorHandle::tensor()
+{
+    return _sub_tensor;
+}
+
+void NESubTensorHandle::map(bool blocking)
+{
+    ARM_COMPUTE_UNUSED(blocking);
+}
+
+void NESubTensorHandle::unmap()
+{
+    // noop
+}
+
+bool NESubTensorHandle::is_subtensor() const
+{
+    return true;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NETensorHandle.cpp b/src/graph2/backends/NEON/NETensorHandle.cpp
new file mode 100644
index 0000000..a4af8aa
--- /dev/null
+++ b/src/graph2/backends/NEON/NETensorHandle.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+NETensorHandle::NETensorHandle(const ITensorInfo &info)
+    : _tensor()
+{
+    _tensor.allocator()->init(info);
+}
+
+void NETensorHandle::allocate()
+{
+    _tensor.allocator()->allocate();
+}
+
+const arm_compute::ITensor &NETensorHandle::tensor() const
+{
+    return _tensor;
+}
+
+arm_compute::ITensor &NETensorHandle::tensor()
+{
+    return _tensor;
+}
+
+void NETensorHandle::map(bool blocking)
+{
+    ARM_COMPUTE_UNUSED(blocking);
+}
+
+void NETensorHandle::unmap()
+{
+}
+
+bool NETensorHandle::is_subtensor() const
+{
+    return false;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/detail/ExecutionHelpers.cpp b/src/graph2/detail/ExecutionHelpers.cpp
new file mode 100644
index 0000000..a7eba0f
--- /dev/null
+++ b/src/graph2/detail/ExecutionHelpers.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/detail/ExecutionHelpers.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/GraphManager.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/backends/BackendRegistry.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace detail
+{
+void default_initialize_backends()
+{
+    for(const auto &backend : backends::BackendRegistry::get().backends())
+    {
+        backend.second->initialize_backend();
+    }
+}
+
+void configure_all_tensors(Graph &g)
+{
+    auto &tensors = g.tensors();
+
+    for(auto &tensor : tensors)
+    {
+        if(tensor)
+        {
+            Target target  = tensor->desc().target;
+            auto   backend = backends::BackendRegistry::get().find_backend(target);
+            ARM_COMPUTE_ERROR_ON_MSG(!backend, "Requested backend doesn't exist!");
+            auto handle = backend->create_tensor(*tensor);
+            ARM_COMPUTE_ERROR_ON_MSG(!backend, "Couldn't create backend handle!");
+            tensor->set_handle(std::move(handle));
+        }
+    }
+}
+
+void allocate_all_tensors(Graph &g)
+{
+    auto &tensors = g.tensors();
+
+    for(auto &tensor : tensors)
+    {
+        if(tensor && !tensor->bound_edges().empty())
+        {
+            ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
+            tensor->handle()->allocate();
+        }
+    }
+}
+
+ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx)
+{
+    ExecutionWorkload workload;
+    auto             &nodes = g.nodes();
+
+    // Create tasks
+    for(auto &node : nodes)
+    {
+        if(node != nullptr)
+        {
+            Target assigned_target = node->assigned_target();
+            auto   backend         = backends::BackendRegistry::get().find_backend(assigned_target);
+            ARM_COMPUTE_ERROR_ON_MSG(!backend, "Requested backend doesn't exist!");
+            auto func = backend->configure_node(*node, ctx);
+            if(func != nullptr)
+            {
+                ExecutionTask task;
+                task.task = std::move(func);
+                task.node = node.get();
+                workload.tasks.push_back(std::move(task));
+            }
+        }
+    }
+
+    // Add inputs and outputs
+    for(auto &node : nodes)
+    {
+        if(node != nullptr && node->type() == NodeType::Input)
+        {
+            workload.inputs.push_back(node->output(0));
+        }
+
+        if(node != nullptr && node->type() == NodeType::Output)
+        {
+            workload.outputs.push_back(node->input(0));
+            continue;
+        }
+    }
+
+    return workload;
+}
+
+void call_tensor_accessor(Tensor *tensor)
+{
+    ARM_COMPUTE_ERROR_ON(!tensor);
+    tensor->call_accessor();
+}
+
+void call_all_const_node_accessors(Graph &g)
+{
+    auto &nodes = g.nodes();
+
+    for(auto &node : nodes)
+    {
+        if(node != nullptr && node->type() == NodeType::Const)
+        {
+            call_tensor_accessor(node->output(0));
+        }
+    }
+}
+
+void call_all_input_node_accessors(ExecutionWorkload &workload)
+{
+    for(auto &input : workload.inputs)
+    {
+        if(input != nullptr)
+        {
+            input->call_accessor();
+        }
+    }
+}
+
+void call_all_tasks(ExecutionWorkload &workload)
+{
+    for(auto &task : workload.tasks)
+    {
+        task();
+    }
+}
+
+void call_all_output_node_accessors(ExecutionWorkload &workload)
+{
+    for(auto &output : workload.outputs)
+    {
+        if(output != nullptr)
+        {
+            output->call_accessor();
+        }
+    }
+}
+} // namespace detail
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/frontend/Stream.cpp b/src/graph2/frontend/Stream.cpp
new file mode 100644
index 0000000..076b9ac
--- /dev/null
+++ b/src/graph2/frontend/Stream.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/frontend/Stream.h"
+
+#include "arm_compute/graph2/Utils.h"
+#include "arm_compute/graph2/frontend/ILayer.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+Stream::Stream(size_t id, std::string name)
+    : _manager(), _ctx(), _g(id, std::move(name))
+{
+}
+
+void Stream::finalize(Target target, bool enable_tuning, bool enable_memory_management)
+{
+    PassManager pm = create_default_pass_manager();
+    _ctx.enable_tuning(enable_tuning);
+    _ctx.enable_memory_managenent(enable_memory_management);
+    _manager.finalize_graph(_g, _ctx, pm, target);
+}
+
+void Stream::run()
+{
+    _manager.execute_graph(_g);
+}
+
+void Stream::add_layer(ILayer &layer)
+{
+    auto nid   = layer.create_layer(*this);
+    _tail_node = nid;
+}
+
+const Graph &Stream::graph() const
+{
+    return _g;
+}
+
+Graph &Stream::graph()
+{
+    return _g;
+}
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/frontend/SubStream.cpp b/src/graph2/frontend/SubStream.cpp
new file mode 100644
index 0000000..e6fa605
--- /dev/null
+++ b/src/graph2/frontend/SubStream.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/frontend/SubStream.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/frontend/ILayer.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace frontend
+{
+SubStream::SubStream(IStream &s)
+    : _s(s)
+{
+    _hints     = s.hints();
+    _tail_node = s.tail_node();
+}
+
+void SubStream::add_layer(ILayer &layer)
+{
+    auto nid   = layer.create_layer(*this);
+    _tail_node = nid;
+}
+
+const Graph &SubStream::graph() const
+{
+    return _s.graph();
+}
+
+Graph &SubStream::graph()
+{
+    return _s.graph();
+}
+} // namespace frontend
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/mutators/DepthConcatSubTensorMutator.cpp b/src/graph2/mutators/DepthConcatSubTensorMutator.cpp
new file mode 100644
index 0000000..cc8de6b
--- /dev/null
+++ b/src/graph2/mutators/DepthConcatSubTensorMutator.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/mutators/DepthConcatSubTensorMutator.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/backends/BackendRegistry.h"
+#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/core/utils/misc/Iterable.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+const char *DepthConcatSubTensorMutator::name()
+{
+    return "DepthConcatSubTensorMutator";
+}
+
+void DepthConcatSubTensorMutator::mutate(Graph &g)
+{
+    // Should be in reverse order of execution
+    for(auto &node : arm_compute::utils::iterable::reverse_iterate(g.nodes()))
+    {
+        if(node && node->type() == NodeType::DepthConcatenateLayer && node->output(0) != nullptr)
+        {
+            // Get output tensor
+            auto output_tensor = node->output(0);
+
+            // Check that all tensor have the same target and valid inputs
+            bool is_valid = std::all_of(node->input_edges().cbegin(), node->input_edges().cend(),
+                                        [&](const EdgeID & eid)
+            {
+                return (g.edge(eid) != nullptr) && (g.edge(eid)->tensor() != nullptr) && (g.edge(eid)->tensor()->desc().target == output_tensor->desc().target);
+            });
+
+            // Create subtensors
+            if(is_valid && backends::BackendRegistry::get().find_backend(output_tensor->desc().target) != nullptr)
+            {
+                ARM_COMPUTE_LOG_GRAPH_VERBOSE("Using sub-tensors for the node with ID : "
+                                              << node->id() << " and name : " << node->name() << std::endl);
+                // Create sub-tensor handles
+                unsigned depth = 0;
+                for(unsigned int i = 0; i < node->input_edges().size(); ++i)
+                {
+                    auto       input_tensor = node->input(i);
+                    const auto input_shape  = input_tensor->desc().shape;
+
+                    auto backend = backends::BackendRegistry::get().find_backend(input_tensor->desc().target);
+                    auto handle  = backend->create_subtensor(output_tensor->handle(), input_shape, Coordinates(0, 0, depth));
+                    input_tensor->set_handle(std::move(handle));
+
+                    depth += input_shape.z();
+                }
+
+                auto *dc_node = arm_compute::utils::cast::polymorphic_downcast<DepthConcatenateLayerNode *>(node.get());
+                dc_node->set_enabled(false);
+            }
+        }
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/mutators/InPlaceOperationMutator.cpp b/src/graph2/mutators/InPlaceOperationMutator.cpp
new file mode 100644
index 0000000..bb13e98
--- /dev/null
+++ b/src/graph2/mutators/InPlaceOperationMutator.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/mutators/InPlaceOperationMutator.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/Logger.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+const char *InPlaceOperationMutator::name()
+{
+    return "InPlaceOperationMutator";
+}
+
+void InPlaceOperationMutator::mutate(Graph &g)
+{
+    std::set<NodeType> in_place_nodes = { NodeType::BatchNormalizationLayer, NodeType::ActivationLayer };
+
+    // Not interested in the order of nodes
+    for(auto &node : g.nodes())
+    {
+        if(node && in_place_nodes.find(node->type()) != std::end(in_place_nodes))
+        {
+            // Get input edge
+            Edge *input_edge = node->input_edge(0);
+
+            // Check if parent has a single output if yes then force in place calculation else not
+            if((input_edge != nullptr) && (input_edge->producer() != nullptr) && (input_edge->producer()->output_edges().size() == 1))
+            {
+                ARM_COMPUTE_LOG_GRAPH_VERBOSE("Switching to in-place computation for the node with ID : "
+                                              << node->id() << " and name : " << node->name() << std::endl);
+                // Update output
+                auto tensor = input_edge->tensor();
+                node->set_output_tensor(tensor->id(), 0);
+            }
+        }
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/mutators/NodeFusionMutator.cpp b/src/graph2/mutators/NodeFusionMutator.cpp
new file mode 100644
index 0000000..d0ab3e7
--- /dev/null
+++ b/src/graph2/mutators/NodeFusionMutator.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/mutators/NodeFusionMutator.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace detail
+{
+void fuse_batch_norm_with_activation(Graph &g)
+{
+    // Not interested in the order of nodes
+    for(auto &node : g.nodes())
+    {
+        // Check if the node is batch norm and not a branching node
+        if(node && node->type() == NodeType::BatchNormalizationLayer && node->output_edges().size() == 1)
+        {
+            auto output_edge_id = *node->output_edges().begin();
+            auto output_edge    = g.edge(output_edge_id);
+            // Check if following node is an activation layer node
+            if((output_edge != nullptr) && (output_edge->consumer() != nullptr) && (output_edge->consumer()->type() == NodeType::ActivationLayer))
+            {
+                ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing Batch Normalization node with ID : " << output_edge->producer_id()
+                                              << " with Activation Layer node with ID : " << output_edge->consumer_id() << std::endl);
+
+                auto *bn_node  = arm_compute::utils::cast::polymorphic_downcast<BatchNormalizationLayerNode *>(output_edge->producer());
+                auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(output_edge->consumer());
+
+                // Get driving nodes of activation node
+                std::vector<NodeIdxPair> act_driving_nodes;
+                for(auto &act_output_edge_id : act_node->output_edges())
+                {
+                    auto act_output_edge = g.edge(act_output_edge_id);
+                    if(act_output_edge != nullptr)
+                    {
+                        ARM_COMPUTE_ERROR_ON(act_output_edge->consumer() == nullptr);
+                        act_driving_nodes.push_back({ act_output_edge->consumer_id(), act_output_edge->consumer_idx() });
+                    }
+                }
+
+                // Set activation info to batch normalization
+                bn_node->set_fused_activation(act_node->activation_info());
+
+                // Remove activation node
+                g.remove_node(act_node->id());
+
+                // Update batch normalization node outputs
+                for(auto &driving_node : act_driving_nodes)
+                {
+                    g.add_connection(bn_node->id(), 0, driving_node.node_id, driving_node.index);
+                }
+            }
+        }
+    }
+}
+} // namespace detail
+
+const char *NodeFusionMutator::name()
+{
+    return "NodeFusionMutator";
+}
+
+void NodeFusionMutator::mutate(Graph &g)
+{
+    detail::fuse_batch_norm_with_activation(g);
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/nodes/ActivationLayerNode.cpp b/src/graph2/nodes/ActivationLayerNode.cpp
new file mode 100644
index 0000000..c7c36e9
--- /dev/null
+++ b/src/graph2/nodes/ActivationLayerNode.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/ActivationLayerNode.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info)
+    : _info(info)
+{
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+ActivationLayerInfo ActivationLayerNode::activation_info() const
+{
+    return _info;
+}
+
+bool ActivationLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor ActivationLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    return src->desc();
+}
+
+Status ActivationLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType ActivationLayerNode::type() const
+{
+    return NodeType::ActivationLayer;
+}
+
+void ActivationLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/BatchNormalizationLayerNode.cpp b/src/graph2/nodes/BatchNormalizationLayerNode.cpp
new file mode 100644
index 0000000..b9f6342
--- /dev/null
+++ b/src/graph2/nodes/BatchNormalizationLayerNode.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/BatchNormalizationLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+BatchNormalizationLayerNode::BatchNormalizationLayerNode(float epsilon, ActivationLayerInfo fused_activation)
+    : _epsilon(epsilon), _fused_activation(fused_activation)
+{
+    _input_edges.resize(5, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+float BatchNormalizationLayerNode::epsilon() const
+{
+    return _epsilon;
+}
+
+ActivationLayerInfo BatchNormalizationLayerNode::fused_activation() const
+{
+    return _fused_activation;
+}
+
+void BatchNormalizationLayerNode::set_fused_activation(ActivationLayerInfo fused_activation)
+{
+    _fused_activation = fused_activation;
+}
+
+bool BatchNormalizationLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor BatchNormalizationLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    return src->desc();
+}
+
+Status BatchNormalizationLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType BatchNormalizationLayerNode::type() const
+{
+    return NodeType::BatchNormalizationLayer;
+}
+
+void BatchNormalizationLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/ConstNode.cpp b/src/graph2/nodes/ConstNode.cpp
new file mode 100644
index 0000000..5bd6a81
--- /dev/null
+++ b/src/graph2/nodes/ConstNode.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/ConstNode.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+ConstNode::ConstNode(TensorDescriptor desc)
+    : _desc(desc)
+{
+    _outputs.resize(1, NullTensorID);
+}
+
+bool ConstNode::forward_descriptors()
+{
+    if(output_id(0) != NullTensorID)
+    {
+        Tensor *t = output(0);
+        ARM_COMPUTE_ERROR_ON(t == nullptr);
+        t->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor ConstNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    return _desc;
+}
+
+Status ConstNode::validate()
+{
+    return Status{};
+}
+
+NodeType ConstNode::type() const
+{
+    return NodeType::Const;
+}
+
+void ConstNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/nodes/ConvolutionLayerNode.cpp b/src/graph2/nodes/ConvolutionLayerNode.cpp
new file mode 100644
index 0000000..499b352
--- /dev/null
+++ b/src/graph2/nodes/ConvolutionLayerNode.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/ConvolutionLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info, ConvolutionMethod method)
+    : _info(std::move(info)), _method(method)
+{
+    _input_edges.resize(3, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+void ConvolutionLayerNode::set_convolution_method(ConvolutionMethod method)
+{
+    _method = method;
+}
+
+ConvolutionMethod ConvolutionLayerNode::convolution_method() const
+{
+    return _method;
+}
+
+PadStrideInfo ConvolutionLayerNode::convolution_info() const
+{
+    return _info;
+}
+
+TensorShape ConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+{
+    unsigned int output_width  = 0;
+    unsigned int output_height = 0;
+    std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
+
+    TensorShape output_shape{ input_shape };
+    output_shape.set(0, output_width);
+    output_shape.set(1, output_height);
+    output_shape.set(2, weights_shape[3]);
+
+    return output_shape;
+}
+
+bool ConvolutionLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor ConvolutionLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    const Tensor *src     = input(0);
+    const Tensor *weights = input(1);
+
+    ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
+
+    TensorDescriptor output_info  = src->desc();
+    TensorShape      output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
+    output_info.shape             = output_shape;
+    return output_info;
+}
+
+Status ConvolutionLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType ConvolutionLayerNode::type() const
+{
+    return NodeType::ConvolutionLayer;
+}
+
+void ConvolutionLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/DepthConcatenateLayerNode.cpp b/src/graph2/nodes/DepthConcatenateLayerNode.cpp
new file mode 100644
index 0000000..dcd6651
--- /dev/null
+++ b/src/graph2/nodes/DepthConcatenateLayerNode.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/DepthConcatenateLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+DepthConcatenateLayerNode::DepthConcatenateLayerNode(unsigned int total_nodes)
+    : _total_nodes(total_nodes), _is_enabled(true)
+{
+    _input_edges.resize(total_nodes, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+void DepthConcatenateLayerNode::set_enabled(bool is_enabled)
+{
+    _is_enabled = is_enabled;
+}
+
+bool DepthConcatenateLayerNode::is_enabled() const
+{
+    return _is_enabled;
+}
+
+TensorShape DepthConcatenateLayerNode::compute_output_shape(const std::vector<TensorShape> &input_shapes)
+{
+    ARM_COMPUTE_ERROR_ON(input_shapes.size() == 0);
+
+    TensorShape output_shape = input_shapes[0];
+
+    size_t max_x = 0;
+    size_t max_y = 0;
+    size_t depth = 0;
+
+    for(const auto &shape : input_shapes)
+    {
+        max_x = std::max(shape.x(), max_x);
+        max_y = std::max(shape.y(), max_y);
+        depth += shape.z();
+    }
+
+    output_shape.set(0, max_x);
+    output_shape.set(1, max_y);
+    output_shape.set(2, depth);
+
+    return output_shape;
+}
+
+bool DepthConcatenateLayerNode::forward_descriptors()
+{
+    if(_outputs[0] != NullTensorID)
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor DepthConcatenateLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    // Check if all input tensors are set
+    bool are_all_inputs_set = std::all_of(std::begin(_input_edges), std::end(_input_edges), [](const EdgeID & eid)
+    {
+        return eid != EmptyEdgeID;
+    });
+
+    TensorDescriptor output_info = {};
+
+    if(are_all_inputs_set)
+    {
+        std::vector<TensorShape> inputs_shapes;
+        for(unsigned int i = 0; i < _input_edges.size(); ++i)
+        {
+            const Tensor *t = _graph->tensor(input_id(i));
+            ARM_COMPUTE_ERROR_ON(t == nullptr);
+            inputs_shapes.push_back(t->desc().shape);
+        }
+        output_info              = input(0)->desc();
+        TensorShape output_shape = compute_output_shape(inputs_shapes);
+        output_info.shape        = output_shape;
+    }
+
+    return output_info;
+}
+
+Status DepthConcatenateLayerNode::validate()
+{
+    ARM_COMPUTE_UNUSED(_total_nodes);
+    return Status{};
+}
+
+NodeType DepthConcatenateLayerNode::type() const
+{
+    return NodeType::DepthConcatenateLayer;
+}
+
+void DepthConcatenateLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp
new file mode 100644
index 0000000..b030e8b
--- /dev/null
+++ b/src/graph2/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/DepthwiseConvolutionLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, DepthwiseConvolutionMethod method)
+    : _info(std::move(info)), _method(method)
+{
+    _input_edges.resize(3, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+void DepthwiseConvolutionLayerNode::set_depthwise_convolution_method(DepthwiseConvolutionMethod method)
+{
+    _method = method;
+}
+
+DepthwiseConvolutionMethod DepthwiseConvolutionLayerNode::depthwise_convolution_method() const
+{
+    return _method;
+}
+
+PadStrideInfo DepthwiseConvolutionLayerNode::convolution_info() const
+{
+    return _info;
+}
+
+TensorShape DepthwiseConvolutionLayerNode::compute_output_shape(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo info)
+{
+    unsigned int output_width  = 0;
+    unsigned int output_height = 0;
+    std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), info);
+
+    TensorShape output_shape{ input_shape };
+    output_shape.set(0, output_width);
+    output_shape.set(1, output_height);
+
+    return output_shape;
+}
+
+bool DepthwiseConvolutionLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor DepthwiseConvolutionLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    const Tensor *src     = input(0);
+    const Tensor *weights = input(1);
+
+    ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr);
+
+    TensorDescriptor output_info  = src->desc();
+    TensorShape      output_shape = compute_output_shape(src->desc().shape, weights->desc().shape, _info);
+    output_info.shape             = output_shape;
+    return output_info;
+}
+
+Status DepthwiseConvolutionLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType DepthwiseConvolutionLayerNode::type() const
+{
+    return NodeType::DepthwiseConvolutionLayer;
+}
+
+void DepthwiseConvolutionLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/EltwiseLayerNode.cpp b/src/graph2/nodes/EltwiseLayerNode.cpp
new file mode 100644
index 0000000..149d926
--- /dev/null
+++ b/src/graph2/nodes/EltwiseLayerNode.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/EltwiseLayerNode.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+EltwiseLayerNode::EltwiseLayerNode(EltwiseOperation op)
+    : _op(op)
+{
+    _input_edges.resize(2, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+EltwiseOperation EltwiseLayerNode::eltwise_operation() const
+{
+    return _op;
+}
+
+bool EltwiseLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor EltwiseLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_UNUSED(_op);
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    return src->desc();
+}
+
+Status EltwiseLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType EltwiseLayerNode::type() const
+{
+    return NodeType::EltwiseLayer;
+}
+
+void EltwiseLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/nodes/FlattenLayerNode.cpp b/src/graph2/nodes/FlattenLayerNode.cpp
new file mode 100644
index 0000000..7c4059f
--- /dev/null
+++ b/src/graph2/nodes/FlattenLayerNode.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/FlattenLayerNode.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+FlattenLayerNode::FlattenLayerNode()
+{
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+bool FlattenLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor FlattenLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    TensorDescriptor output_desc = src->desc();
+    output_desc.shape.collapse(src->desc().shape.num_dimensions());
+
+    return output_desc;
+}
+
+Status FlattenLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType FlattenLayerNode::type() const
+{
+    return NodeType::FlattenLayer;
+}
+
+void FlattenLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/FullyConnectedLayer.cpp b/src/graph2/nodes/FullyConnectedLayer.cpp
new file mode 100644
index 0000000..195adc4
--- /dev/null
+++ b/src/graph2/nodes/FullyConnectedLayer.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/FullyConnectedLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs)
+    : _num_outputs(num_outputs)
+{
+    _input_edges.resize(3, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+TensorShape FullyConnectedLayerNode::compute_weights_shape(TensorShape input_shape, unsigned int num_outputs)
+{
+    unsigned int num_weights    = 1;
+    unsigned int num_dimensions = input_shape.num_dimensions();
+    // Ignore the batch dimension if there is one:
+    if(num_dimensions == 2 || num_dimensions == 4)
+    {
+        num_dimensions--;
+    }
+    for(unsigned int i = 0; i < num_dimensions; i++)
+    {
+        num_weights *= input_shape[i];
+    }
+    return TensorShape(num_weights, num_outputs);
+}
+
+TensorShape FullyConnectedLayerNode::compute_output_shape(TensorShape input_shape, unsigned int num_outputs)
+{
+    // Note: Only 1D batch space is supported at the moment
+    unsigned int batches = input_shape[1];
+    if(input_shape.num_dimensions() > 2)
+    {
+        batches = input_shape[3];
+    }
+    return TensorShape(num_outputs, batches);
+}
+
+bool FullyConnectedLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor FullyConnectedLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    TensorDescriptor output_info  = src->desc();
+    TensorShape      output_shape = compute_output_shape(src->desc().shape, _num_outputs);
+    output_info.shape             = output_shape;
+    return output_info;
+}
+
+Status FullyConnectedLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType FullyConnectedLayerNode::type() const
+{
+    return NodeType::FullyConnectedLayer;
+}
+
+void FullyConnectedLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/InputNode.cpp b/src/graph2/nodes/InputNode.cpp
new file mode 100644
index 0000000..84cce2a
--- /dev/null
+++ b/src/graph2/nodes/InputNode.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/InputNode.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+InputNode::InputNode(TensorDescriptor desc)
+    : _desc(desc)
+{
+    _outputs.resize(1, NullTensorID);
+}
+
+bool InputNode::forward_descriptors()
+{
+    if(output_id(0) != NullTensorID)
+    {
+        Tensor *t = output(0);
+        ARM_COMPUTE_ERROR_ON(t == nullptr);
+        t->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor InputNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    return _desc;
+}
+
+Status InputNode::validate()
+{
+    return Status{};
+}
+
+NodeType InputNode::type() const
+{
+    return NodeType::Input;
+}
+
+void InputNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/nodes/NormalizationLayerNode.cpp b/src/graph2/nodes/NormalizationLayerNode.cpp
new file mode 100644
index 0000000..a394879
--- /dev/null
+++ b/src/graph2/nodes/NormalizationLayerNode.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/NormalizationLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+NormalizationLayerNode::NormalizationLayerNode(NormalizationLayerInfo norm_info)
+    : _info(norm_info)
+{
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+NormalizationLayerInfo NormalizationLayerNode::normalization_info() const
+{
+    return _info;
+}
+
+bool NormalizationLayerNode::forward_descriptors()
+{
+    if(input_id(0) != NullTensorID && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor NormalizationLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    return src->desc();
+}
+
+Status NormalizationLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType NormalizationLayerNode::type() const
+{
+    return NodeType::NormalizationLayer;
+}
+
+void NormalizationLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/OutputNode.cpp b/src/graph2/nodes/OutputNode.cpp
new file mode 100644
index 0000000..1daebb1
--- /dev/null
+++ b/src/graph2/nodes/OutputNode.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/OutputNode.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/graph2/Tensor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+OutputNode::OutputNode()
+{
+    _input_edges.resize(1, EmptyEdgeID);
+}
+
+bool OutputNode::forward_descriptors()
+{
+    return true;
+}
+
+TensorDescriptor OutputNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    return TensorDescriptor();
+}
+
+Status OutputNode::validate()
+{
+    return Status{};
+}
+
+NodeType OutputNode::type() const
+{
+    return NodeType::Output;
+}
+
+void OutputNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/nodes/PoolingLayerNode.cpp b/src/graph2/nodes/PoolingLayerNode.cpp
new file mode 100644
index 0000000..2c2cf53
--- /dev/null
+++ b/src/graph2/nodes/PoolingLayerNode.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/PoolingLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+PoolingLayerNode::PoolingLayerNode(PoolingLayerInfo pool_info)
+    : _info(std::move(pool_info))
+{
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+PoolingLayerInfo PoolingLayerNode::pooling_info() const
+{
+    return _info;
+}
+
+TensorShape PoolingLayerNode::compute_output_shape(TensorShape input_shape, PoolingLayerInfo info)
+{
+    const int pool_size_x = info.is_global_pooling() ? input_shape.x() : info.pool_size().width;
+    const int pool_size_y = info.is_global_pooling() ? input_shape.y() : info.pool_size().height;
+
+    unsigned int pooled_width  = 0;
+    unsigned int pooled_height = 0;
+    std::tie(pooled_width, pooled_height) = scaled_dimensions(input_shape.x(), input_shape.y(), pool_size_x, pool_size_y, info.pad_stride_info());
+
+    TensorShape output_shape{ input_shape };
+    output_shape.set(0, pooled_width);
+    output_shape.set(1, pooled_height);
+
+    return output_shape;
+}
+
+bool PoolingLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor PoolingLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    TensorDescriptor output_info  = src->desc();
+    TensorShape      output_shape = compute_output_shape(src->desc().shape, _info);
+    output_info.shape             = output_shape;
+    return output_info;
+}
+
+Status PoolingLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType PoolingLayerNode::type() const
+{
+    return NodeType::PoolingLayer;
+}
+
+void PoolingLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/ReshapeLayer.cpp b/src/graph2/nodes/ReshapeLayer.cpp
new file mode 100644
index 0000000..6280eea
--- /dev/null
+++ b/src/graph2/nodes/ReshapeLayer.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/ReshapeLayerNode.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+ReshapeLayerNode::ReshapeLayerNode(TensorShape shape)
+    : _shape(shape)
+{
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+bool ReshapeLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor ReshapeLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    TensorDescriptor output_desc = src->desc();
+    output_desc.shape            = _shape;
+
+    return output_desc;
+}
+
+Status ReshapeLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType ReshapeLayerNode::type() const
+{
+    return NodeType::ReshapeLayer;
+}
+
+void ReshapeLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/nodes/SoftmaxLayerNode.cpp b/src/graph2/nodes/SoftmaxLayerNode.cpp
new file mode 100644
index 0000000..83bc978
--- /dev/null
+++ b/src/graph2/nodes/SoftmaxLayerNode.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/nodes/SoftmaxLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/INodeVisitor.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+SoftmaxLayerNode::SoftmaxLayerNode(float beta)
+    : _beta(beta)
+{
+    _input_edges.resize(1, EmptyEdgeID);
+    _outputs.resize(1, NullTensorID);
+}
+
+float SoftmaxLayerNode::beta() const
+{
+    return _beta;
+}
+
+bool SoftmaxLayerNode::forward_descriptors()
+{
+    if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+    {
+        Tensor *dst = output(0);
+        ARM_COMPUTE_ERROR_ON(dst == nullptr);
+        dst->desc() = configure_output(0);
+        return true;
+    }
+    return false;
+}
+
+TensorDescriptor SoftmaxLayerNode::configure_output(size_t idx) const
+{
+    ARM_COMPUTE_UNUSED(idx);
+    ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+    const Tensor *src = input(0);
+    ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+    return src->desc();
+}
+
+Status SoftmaxLayerNode::validate()
+{
+    return Status{};
+}
+
+NodeType SoftmaxLayerNode::type() const
+{
+    return NodeType::SoftmaxLayer;
+}
+
+void SoftmaxLayerNode::accept(INodeVisitor &v)
+{
+    v.visit(*this);
+}
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/printers/DotGraphPrinter.cpp b/src/graph2/printers/DotGraphPrinter.cpp
new file mode 100644
index 0000000..04987ee
--- /dev/null
+++ b/src/graph2/printers/DotGraphPrinter.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/printers/DotGraphPrinter.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/TypePrinter.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+void DotGraphVisitor::visit(ActivationLayerNode &n)
+{
+    std::stringstream ss;
+    ss << n.activation_info().activation();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(BatchNormalizationLayerNode &n)
+{
+    std::stringstream ss;
+    ss << (n.fused_activation().enabled() ? to_string(n.fused_activation().activation()) : "");
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(ConvolutionLayerNode &n)
+{
+    std::stringstream ss;
+    ss << n.convolution_method();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(DepthConcatenateLayerNode &n)
+{
+    std::stringstream ss;
+    ss << "Enabled: " << n.is_enabled();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(DepthwiseConvolutionLayerNode &n)
+{
+    std::stringstream ss;
+    ss << n.depthwise_convolution_method();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(EltwiseLayerNode &n)
+{
+    std::stringstream ss;
+    ss << n.eltwise_operation();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(NormalizationLayerNode &n)
+{
+    std::stringstream ss;
+    ss << n.normalization_info().type();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::visit(PoolingLayerNode &n)
+{
+    std::stringstream ss;
+    ss << n.pooling_info().pool_type();
+    ss << R"( \n )";
+    ss << n.pooling_info().pool_size();
+    ss << R"( \n )";
+    ss << n.pooling_info().pad_stride_info();
+    _info = ss.str();
+}
+
+void DotGraphVisitor::default_visit()
+{
+    _info.clear();
+}
+
+const std::string &DotGraphVisitor::info() const
+{
+    return _info;
+}
+
+void DotGraphPrinter::print(const Graph &g, std::ostream &os)
+{
+    // Print header
+    print_header(g, os);
+
+    // Print nodes
+    print_nodes(g, os);
+
+    // Print edges
+    print_edges(g, os);
+
+    // Print footer
+    print_footer(g, os);
+}
+
+void DotGraphPrinter::print_header(const Graph &g, std::ostream &os)
+{
+    // Print graph name
+    std::string graph_name = (g.name().empty()) ? "Graph" : g.name();
+    os << "digraph " << graph_name << "{\n";
+}
+
+void DotGraphPrinter::print_footer(const Graph &g, std::ostream &os)
+{
+    ARM_COMPUTE_UNUSED(g);
+    os << "}\n";
+}
+
+void DotGraphPrinter::print_nodes(const Graph &g, std::ostream &os)
+{
+    for(const auto &n : g.nodes())
+    {
+        if(n)
+        {
+            // Output node id
+            std::string node_id = std::string("n") + support::cpp11::to_string(n->id());
+            os << node_id << " ";
+
+            // Output label
+            n->accept(_dot_node_visitor);
+
+            std::string name             = n->name().empty() ? node_id : n->name();
+            auto        node_description = _dot_node_visitor.info();
+
+            os << R"([label = ")" << name << R"( \n )" << n->assigned_target() << R"( \n )" << node_description << R"("])";
+            os << ";\n";
+        }
+    }
+}
+
+void DotGraphPrinter::print_edges(const Graph &g, std::ostream &os)
+{
+    for(const auto &e : g.edges())
+    {
+        if(e)
+        {
+            std::string source_node_id = std::string("n") + support::cpp11::to_string(e->producer_id());
+            std::string sink_node_id   = std::string("n") + support::cpp11::to_string(e->consumer_id());
+            os << source_node_id << " -> " << sink_node_id << " ";
+            const Tensor *t = e->tensor();
+            ARM_COMPUTE_ERROR_ON(t == nullptr);
+            os << R"([label = ")" << t->desc().shape << R"( \n )" << t->desc().data_type << R"("])";
+            os << ";\n";
+        }
+    }
+}
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/runtime/CL/CLSubTensor.cpp b/src/runtime/CL/CLSubTensor.cpp
index 5f58024..d0e7d76 100644
--- a/src/runtime/CL/CLSubTensor.cpp
+++ b/src/runtime/CL/CLSubTensor.cpp
@@ -29,6 +29,11 @@
 
 using namespace arm_compute;
 
+CLSubTensor::CLSubTensor()
+    : _parent(nullptr), _info()
+{
+}
+
 CLSubTensor::CLSubTensor(ICLTensor *parent, const TensorShape &tensor_shape, const Coordinates &coords, bool extend_parent)
     : _parent(nullptr), _info()
 {
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index e6f8f26..c1926a7 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -138,8 +138,10 @@
 
         // Manage intermediate buffers
         _memory_group.manage(&_tmp_a);
-        _memory_group.manage(&_tmp_b);
-
+        if(_reshape_b_only_on_first_run)
+        {
+            _memory_group.manage(&_tmp_b);
+        }
         // _tmp_a and _tmp_b will be auto configured in _interleave_kernel and in _transpose_kernel
 
         // Configure interleave kernel
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 3cba98c..bc339f1 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -38,8 +38,8 @@
 using namespace arm_compute;
 using namespace arm_compute::misc::shape_calculator;
 
-CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
-    : _memory_group(std::move(memory_manager)), _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped()
+CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
+    : _weights_reshape_kernel()
 {
 }
 
@@ -86,16 +86,12 @@
 
 void CLConvolutionLayerReshapeWeights::run()
 {
-    _memory_group.acquire();
-
     CLScheduler::get().enqueue(_weights_reshape_kernel);
-
-    _memory_group.release();
 }
 
 CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
     : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _im2col_output(),
-      _interleave_output(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_first_run(true)
+      _weights_reshaped(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_first_run(true)
 {
 }
 
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 0a49158..c16ce9b 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -30,6 +30,7 @@
 
 #include <cmath>
 #include <tuple>
+#include <utility>
 
 namespace arm_compute
 {
diff --git a/src/runtime/SubTensor.cpp b/src/runtime/SubTensor.cpp
index c5b8f33..b010a32 100644
--- a/src/runtime/SubTensor.cpp
+++ b/src/runtime/SubTensor.cpp
@@ -27,6 +27,11 @@
 
 using namespace arm_compute;
 
+SubTensor::SubTensor()
+    : _parent(nullptr), _info()
+{
+}
+
 SubTensor::SubTensor(ITensor *parent, const TensorShape &tensor_shape, const Coordinates &coords, bool extend_parent)
     : _parent(nullptr), _info()
 {
diff --git a/tests/SConscript b/tests/SConscript
index f6fe49a..efe6437 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 ARM Limited.
+# Copyright (c) 2017, 2018 ARM Limited.
 #
 # SPDX-License-Identifier: MIT
 #
@@ -58,13 +58,15 @@
 test_env.Append(LIBS = arm_compute_test_framework)
 
 if env['os'] in ['android', 'bare_metal'] or env['standalone']:
+    Import("arm_compute_graph2_a")
     Import("arm_compute_a")
     Import("arm_compute_core_a")
-    test_env.Append(LIBS = [arm_compute_a, arm_compute_core_a])
+    test_env.Append(LIBS = [arm_compute_graph2_a, arm_compute_a, arm_compute_core_a])
     arm_compute_lib = arm_compute_a
 else:
+    Import("arm_compute_graph2_so")
     Import("arm_compute_so")
-    test_env.Append(LIBS = ["arm_compute", "arm_compute_core"])
+    test_env.Append(LIBS = ["arm_compute_graph2", "arm_compute", "arm_compute_core"])
     arm_compute_lib = arm_compute_so
 
 #FIXME Delete before release
@@ -231,10 +233,12 @@
         if test_env['opencl'] and test_env['neon']:
             if env['os'] == 'android':
                 Import('arm_compute_graph_a')
-                graph_dependency = arm_compute_graph_a
+                Import("arm_compute_graph2_a")
+                graph_dependency = [arm_compute_graph_a, arm_compute_graph2_a]
             else:
                 Import('arm_compute_graph_so')
-                graph_dependency = arm_compute_graph_so
+                Import('arm_compute_graph2_so')
+                graph_dependency = [arm_compute_graph_so, arm_compute_graph2_so]
 
             graph_utils = test_env.Object(source="../utils/GraphUtils.cpp", target="GraphUtils")
             for file in Glob("../examples/graph_*.cpp"):
@@ -245,7 +249,7 @@
                     arm_compute_benchmark_examples += [ prog ]
                 else:
                     #-Wl,--allow-shlib-undefined: Ignore dependencies of dependencies
-                    prog = test_env.Program(example, [ test_env.Object(source=file, target=example), graph_utils]+ files_benchmark_examples, LIBS = test_env["LIBS"] + ["arm_compute_graph"], LINKFLAGS=test_env["LINKFLAGS"]+['-Wl,--allow-shlib-undefined'] )
+                    prog = test_env.Program(example, [ test_env.Object(source=file, target=example), graph_utils]+ files_benchmark_examples, LIBS = test_env["LIBS"] + ["arm_compute_graph", "arm_compute_graph2"], LINKFLAGS=test_env["LINKFLAGS"]+['-Wl,--allow-shlib-undefined'] )
                     Depends(prog, graph_dependency)
                     arm_compute_benchmark_examples += [ prog ]
     Depends(arm_compute_benchmark_examples, arm_compute_test_framework)
diff --git a/tests/framework/instruments/InstrumentsStats.cpp b/tests/framework/instruments/InstrumentsStats.cpp
index 6fad8f3..8f7d8a1 100644
--- a/tests/framework/instruments/InstrumentsStats.cpp
+++ b/tests/framework/instruments/InstrumentsStats.cpp
@@ -22,7 +22,7 @@
  * SOFTWARE.
  */
 #include "InstrumentsStats.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 
 namespace arm_compute
 {
diff --git a/tests/validation/reference/Scale.cpp b/tests/validation/reference/Scale.cpp
index 0cc96ab..5c9e956 100644
--- a/tests/validation/reference/Scale.cpp
+++ b/tests/validation/reference/Scale.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,7 +24,7 @@
 
 #include "Scale.h"
 #include "Utils.h"
-#include "arm_compute/core/utils/misc/utility.h"
+#include "arm_compute/core/utils/misc/Utility.h"
 #include "support/ToolchainSupport.h"
 
 namespace arm_compute
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index cc6f404..b4c3ad8 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -29,6 +29,10 @@
 #include "arm_compute/graph/ITensorAccessor.h"
 #include "arm_compute/graph/Types.h"
 
+#include "arm_compute/core/CL/OpenCL.h"
+
+#include "arm_compute/graph2/Types.h"
+
 #include <array>
 #include <random>
 #include <string>
@@ -308,6 +312,26 @@
         return arm_compute::support::cpp14::make_unique<TopNPredictionsAccessor>(labels_path, top_n, output_stream);
     }
 }
+
+/** Utility function to return the TargetHint
+ *
+ * @param[in] target Integer value which expresses the selected target. Must be 0 for NEON or 1 for OpenCL or 2 (OpenCL with Tuner)
+ *
+ * @return the TargetHint
+ */
+inline graph2::Target set_target_hint2(int target)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(target > 2, "Invalid target. Target must be 0 (NEON) or 1 (OpenCL)");
+    if((target == 1 || target == 2) && arm_compute::opencl_is_available())
+    {
+        // If type of target is OpenCL, check if OpenCL is available and initialize the scheduler
+        return graph2::Target::CL;
+    }
+    else
+    {
+        return graph2::Target::NEON;
+    }
+}
 } // namespace graph_utils
 } // namespace arm_compute