COMPMID-793 : Add graph intermediate representation

Change-Id: Ic1685de4e19e0ac79669ef2da64e1dc96c7ea0bf
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/115248
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/src/graph2/backends/BackendRegistry.cpp b/src/graph2/backends/BackendRegistry.cpp
new file mode 100644
index 0000000..5f1218f
--- /dev/null
+++ b/src/graph2/backends/BackendRegistry.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/BackendRegistry.h"
+
+using namespace arm_compute::graph2::backends;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+BackendRegistry::BackendRegistry()
+    : _registered_backends()
+{
+}
+
+BackendRegistry &BackendRegistry::get()
+{
+    static BackendRegistry instance;
+    return instance;
+}
+
+IDeviceBackend *BackendRegistry::find_backend(Target target)
+{
+    ARM_COMPUTE_ERROR_ON(!contains(target));
+    return _registered_backends[target].get();
+}
+
+bool BackendRegistry::contains(Target target) const
+{
+    auto it = _registered_backends.find(target);
+    return (it != _registered_backends.end());
+}
+
+const std::map<Target, std::unique_ptr<IDeviceBackend>> &BackendRegistry::backends() const
+{
+    return _registered_backends;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
diff --git a/src/graph2/backends/CL/CLDeviceBackend.cpp b/src/graph2/backends/CL/CLDeviceBackend.cpp
new file mode 100644
index 0000000..e060331
--- /dev/null
+++ b/src/graph2/backends/CL/CLDeviceBackend.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLDeviceBackend.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/backends/BackendRegistrar.h"
+#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
+#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/BlobLifetimeManager.h"
+#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include "arm_compute/runtime/PoolManager.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace
+{
+bool file_exists(const std::string &filename)
+{
+    std::ifstream file(filename);
+    return file.good();
+}
+} // namespace
+
+/** Register CL backend */
+static detail::BackendRegistrar<CLDeviceBackend> CLDeviceBackend_registrar(Target::CL);
+
+/** Tuner export file */
+static const std::string tuner_data_filename = "acl_tuner.csv";
+
+CLDeviceBackend::CLDeviceBackend()
+    : _tuner(), _allocator(cl::Context::getDefault())
+{
+}
+
+CLDeviceBackend::~CLDeviceBackend()
+{
+    // TODO (geopin01) : Shouldn't call non exception safe stuff here
+    if(_tuner.tune_new_kernels() && !_tuner.lws_table().empty())
+    {
+        _tuner.save_to_file(tuner_data_filename);
+    }
+}
+
+void CLDeviceBackend::set_kernel_tuning(bool enable_tuning)
+{
+    _tuner.set_tune_new_kernels(enable_tuning);
+}
+
+void CLDeviceBackend::initialize_backend()
+{
+    // Load tuner data if available
+    if(_tuner.lws_table().empty() && file_exists(tuner_data_filename))
+    {
+        _tuner.load_from_file(tuner_data_filename);
+    }
+
+    // Setup Scheduler
+    CLScheduler::get().default_init(&_tuner);
+
+    // Create allocator with new context
+    _allocator = CLBufferAllocator();
+}
+
+void CLDeviceBackend::setup_backend_context(GraphContext &ctx)
+{
+    // Setup tuner
+    set_kernel_tuning(ctx.is_tuning_enabled());
+
+    // Setup a management backend
+    if(ctx.memory_management_ctx(Target::CL) == nullptr)
+    {
+        MemoryManagerContext mm_ctx;
+        mm_ctx.target = Target::CL;
+        mm_ctx.mm     = create_memory_manager(MemoryManagerAffinity::Buffer);
+
+        ctx.insert_memory_management_ctx(std::move(mm_ctx));
+    }
+}
+
+std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tensor)
+{
+    // Get tensor descriptor
+    const TensorDescriptor &tensor_desc = tensor.desc();
+    ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::CL);
+
+    // Create backend tensor handle
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    auto       backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
+
+    return std::move(backend_tensor_handle);
+}
+
+std::unique_ptr<ITensorHandle> CLDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords)
+{
+    if(parent == nullptr)
+    {
+        return nullptr;
+    }
+
+    return support::cpp14::make_unique<CLSubTensorHandle>(parent, shape, coords);
+}
+
+std::unique_ptr<arm_compute::IFunction> CLDeviceBackend::configure_node(INode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring CL node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::CL);
+
+    // Configure node
+    return CLFunctionFactory::create(&node, ctx);
+}
+
+arm_compute::Status CLDeviceBackend::validate_node(const INode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating CL node with ID : " << node.id() << std::endl);
+
+    ARM_COMPUTE_UNUSED(node);
+
+    return Status{};
+}
+
+std::shared_ptr<arm_compute::IMemoryManager> CLDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
+{
+    if(affinity == MemoryManagerAffinity::Offset)
+    {
+        ARM_COMPUTE_LOG_GRAPH_WARNING("CL Backend does not support offset affinity memory management!");
+        return nullptr;
+    }
+
+    auto lifetime_mgr = std::make_shared<BlobLifetimeManager>();
+    auto pool_mgr     = std::make_shared<PoolManager>();
+    auto mm           = std::make_shared<MemoryManagerOnDemand>(lifetime_mgr, pool_mgr);
+
+    mm->set_allocator(&_allocator);
+
+    return mm;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLFunctionsFactory.cpp b/src/graph2/backends/CL/CLFunctionsFactory.cpp
new file mode 100644
index 0000000..bba0cce
--- /dev/null
+++ b/src/graph2/backends/CL/CLFunctionsFactory.cpp
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/TypePrinter.h"
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/graph2/backends/Utils.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/runtime/CL/CLFunctions.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute::utils::cast;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace
+{
+/** Returns backing tensor of a given tensor
+ *
+ * @param[in] tensor Tensor to extract the backing tensor from
+ *
+ * @return Backing tensor if present else nullptr
+ */
+arm_compute::ICLTensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+{
+    arm_compute::ICLTensor *backing_tensor = nullptr;
+    if(tensor != nullptr)
+    {
+        ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph2::Target::CL);
+        // Get backing tensor handle
+        ITensorHandle *tensor_handle = tensor->handle();
+        // Get backing tensor
+        backing_tensor = (tensor_handle != nullptr) ? polymorphic_cast<ICLTensor *>(&tensor_handle->tensor()) : nullptr;
+    }
+
+    return backing_tensor;
+}
+
+/** Create a backend activation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend activation layer function
+ */
+std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL ActivationLayerNode node with ID : " << node.id() << " and Name: " << node.name()
+        << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                *input    = get_backing_tensor(node.input(0));
+    ICLTensor                *output   = get_backing_tensor(node.output(0));
+    const ActivationLayerInfo act_info = node.activation_info();
+
+    // Create function
+    auto func = support::cpp14::make_unique<CLActivationLayer>();
+    func->configure(input, output, act_info);
+
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLActivationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Activation function: " << act_info.activation()
+                               << " a: " << act_info.a()
+                               << " b: " << act_info.b()
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend batch normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend batch normalization layer function
+ */
+std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL BatchNormalization node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+
+    // TODO (geopin01) : Var and mean are compulsory, switch function to accept nullptr as beta and/or gamma
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 5);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                *input     = get_backing_tensor(node.input(0));
+    ICLTensor                *mean      = get_backing_tensor(node.input(1));
+    ICLTensor                *var       = get_backing_tensor(node.input(2));
+    ICLTensor                *beta      = get_backing_tensor(node.input(3));
+    ICLTensor                *gamma     = get_backing_tensor(node.input(4));
+    ICLTensor                *output    = get_backing_tensor(node.output(0));
+    const float               epsilon   = node.epsilon();
+    const ActivationLayerInfo fused_act = node.fused_activation();
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLBatchNormalizationLayer>();
+    func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLBatchNormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Epsilon: " << epsilon << " "
+                               << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend convolution layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend convolution layer function
+ */
+std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor              *input          = get_backing_tensor(node.input(0));
+    ICLTensor              *weights        = get_backing_tensor(node.input(1));
+    ICLTensor              *biases         = get_backing_tensor(node.input(2));
+    ICLTensor              *output         = get_backing_tensor(node.output(0));
+    const PadStrideInfo     conv_info      = node.convolution_info();
+    const ConvolutionMethod conv_algorithm = node.convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::CL);
+    std::unique_ptr<IFunction>      func;
+    std::string                     func_name;
+    if(conv_algorithm == ConvolutionMethod::DIRECT)
+    {
+        std::tie(func, func_name) = create_named_function<CLDirectConvolutionLayer>(
+                                        std::string("CLDirectConvolutionLayer"), input, weights, biases, output, conv_info);
+    }
+    else if(conv_algorithm == ConvolutionMethod::GEMM)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<CLGEMMConvolutionLayer>(std::string("CLGEMMConvolutionLayer"), mm,
+                                                                                                 input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<CLConvolutionLayer>(std::string("CLConvolutionLayer"), mm,
+                                                                                             input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend layer depth concatenate function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth concatenate layer function
+ */
+std::unique_ptr<arm_compute::IFunction> create_depth_concatenate_layer(DepthConcatenateLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating CL DepthConcatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Return nullptr if depth concatenate is switched off
+    if(!node.is_enabled())
+    {
+        return nullptr;
+    }
+
+    // Extract IO and info
+    std::vector<arm_compute::ICLTensor *> inputs;
+    for(unsigned int i = 0; i < node.num_inputs(); ++i)
+    {
+        inputs.push_back(get_backing_tensor(node.input(i)));
+    }
+    ICLTensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLDepthConcatenateLayer>();
+    func->configure(inputs, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLDepthConcatenateLayer"
+                               << " Data Type: " << output->info()->data_type()
+                               << " Shape: " << output->info()->tensor_shape()
+                               << " Num Inputs: " << inputs.size()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend layer depth-wise convolution function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth-wise convolution layer function
+ */
+std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name()
+        << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                       *input         = get_backing_tensor(node.input(0));
+    ICLTensor                       *weights       = get_backing_tensor(node.input(1));
+    ICLTensor                       *biases        = get_backing_tensor(node.input(2));
+    ICLTensor                       *output        = get_backing_tensor(node.output(0));
+    const PadStrideInfo              conv_info     = node.convolution_info();
+    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::unique_ptr<IFunction> func;
+    std::string                func_name;
+    if(dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3)
+    {
+        std::tie(func, func_name) = create_named_function<CLDepthwiseConvolutionLayer3x3>(
+                                        std::string("CLDepthwiseConvolutionLayer3x3"), input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_function<CLDepthwiseConvolutionLayer>(
+                                        std::string("CLDepthwiseConvolutionLayer"), input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend element-wise operation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend element-wise operation layer function
+ */
+std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 2);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor             *input1     = get_backing_tensor(node.input(0));
+    ICLTensor             *input2     = get_backing_tensor(node.input(1));
+    ICLTensor             *output     = get_backing_tensor(node.output(0));
+    const EltwiseOperation eltwise_op = node.eltwise_operation();
+    ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+    ARM_COMPUTE_ERROR_ON(input2 == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    std::unique_ptr<IFunction> func = nullptr;
+    std::string                func_name;
+    if(eltwise_op == EltwiseOperation::ADD)
+    {
+        std::tie(func, func_name) = create_named_function<CLArithmeticAddition>(std::string("CLArithmeticAddition"),
+                                                                                input1, input2, output,
+                                                                                ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::SUB)
+    {
+        std::tie(func, func_name) = create_named_function<CLArithmeticSubtraction>(
+                                        std::string("CLArithmeticSubtraction"), input1, input2, output, ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::MUL)
+    {
+        std::tie(func, func_name) = create_named_function<CLPixelWiseMultiplication>(
+                                        std::string("CLPixelWiseMultiplication"), input1, input2, output, 1.f, ConvertPolicy::SATURATE,
+                                        RoundingPolicy::TO_NEAREST_EVEN);
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input1->info()->data_type()
+                               << " Shape : " << input1->info()->tensor_shape()
+                               << std::endl);
+
+    return func;
+}
+
+/** Create a backend flatten layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend flatten layer function
+ */
+std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL FlattenLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input  = get_backing_tensor(node.input(0));
+    ICLTensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLFlattenLayer>();
+    func->configure(input, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLFlattenLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend fully connected layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend fully connected layer function
+ */
+std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL FullyConnectedLayer node with ID : " << node.id() << " and Name: " << node.name()
+        << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input   = get_backing_tensor(node.input(0));
+    ICLTensor *weights = get_backing_tensor(node.input(1));
+    ICLTensor *biases  = get_backing_tensor(node.input(2));
+    ICLTensor *output  = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLFullyConnectedLayer>(get_memory_manager(ctx, Target::CL));
+    func->configure(input, weights, biases, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(weights == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLFullyConnectedLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Biases Shape: " << biases->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend normalization layer function
+ */
+std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL NormalizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor                   *input     = get_backing_tensor(node.input(0));
+    ICLTensor                   *output    = get_backing_tensor(node.output(0));
+    const NormalizationLayerInfo norm_info = node.normalization_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLNormalizationLayer>();
+    func->configure(input, output, norm_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLNormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Normalization info: " << norm_info.type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend pooling layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend pooling layer function
+ */
+std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL PoolingLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor             *input     = get_backing_tensor(node.input(0));
+    ICLTensor             *output    = get_backing_tensor(node.output(0));
+    const PoolingLayerInfo pool_info = node.pooling_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLPoolingLayer>();
+    func->configure(input, output, pool_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLPoolingLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Pooling info: " << pool_info.pool_type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend reshape layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend reshape layer function
+ */
+std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input  = get_backing_tensor(node.input(0));
+    ICLTensor *output = get_backing_tensor(node.output(0));
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLReshapeLayer>();
+    func->configure(input, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLReshapeLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend softmax layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend softmax layer function
+ */
+std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE(
+        "Creating CL SoftmaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ICLTensor *input  = get_backing_tensor(node.input(0));
+    ICLTensor *output = get_backing_tensor(node.output(0));
+    const float beta   = node.beta();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<CLSoftmaxLayer>(get_memory_manager(ctx, Target::CL));
+    func->configure(input, output, beta);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated CLSoftmaxLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+} // namespace
+
+std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &ctx)
+{
+    if(node == nullptr)
+    {
+        return nullptr;
+    }
+
+    NodeType type = node->type();
+    switch(type)
+    {
+        case NodeType::ActivationLayer:
+            return create_activation_layer(*polymorphic_downcast<ActivationLayerNode *>(node));
+        case NodeType::BatchNormalizationLayer:
+            return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
+        case NodeType::ConvolutionLayer:
+            return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+        case NodeType::DepthConcatenateLayer:
+            return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
+        case NodeType::DepthwiseConvolutionLayer:
+            return create_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+        case NodeType::EltwiseLayer:
+            return create_eltwise_layer(*polymorphic_downcast<EltwiseLayerNode *>(node));
+        case NodeType::FlattenLayer:
+            return create_flatten_layer(*polymorphic_downcast<FlattenLayerNode *>(node));
+        case NodeType::FullyConnectedLayer:
+            return create_fully_connected_layer(*polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
+        case NodeType::NormalizationLayer:
+            return create_normalization_layer(*polymorphic_downcast<NormalizationLayerNode *>(node));
+        case NodeType::PoolingLayer:
+            return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
+        case NodeType::ReshapeLayer:
+            return create_reshape_layer(*polymorphic_downcast<ReshapeLayerNode *>(node));
+        case NodeType::SoftmaxLayer:
+            return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
+        default:
+            return nullptr;
+    }
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLSubTensorHandle.cpp b/src/graph2/backends/CL/CLSubTensorHandle.cpp
new file mode 100644
index 0000000..2954652
--- /dev/null
+++ b/src/graph2/backends/CL/CLSubTensorHandle.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+CLSubTensorHandle::CLSubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords)
+    : _sub_tensor()
+{
+    ARM_COMPUTE_ERROR_ON(!parent_handle);
+    auto parent_tensor = arm_compute::utils::cast::polymorphic_downcast<ICLTensor *>(&parent_handle->tensor());
+    _sub_tensor        = arm_compute::CLSubTensor(parent_tensor, shape, coords);
+}
+
+void CLSubTensorHandle::allocate()
+{
+    // noop
+}
+
+const arm_compute::ITensor &CLSubTensorHandle::tensor() const
+{
+    return _sub_tensor;
+}
+
+arm_compute::ITensor &CLSubTensorHandle::tensor()
+{
+    return _sub_tensor;
+}
+
+void CLSubTensorHandle::map(bool blocking)
+{
+    _sub_tensor.map(blocking);
+}
+
+void CLSubTensorHandle::unmap()
+{
+    _sub_tensor.unmap();
+}
+
+bool CLSubTensorHandle::is_subtensor() const
+{
+    return true;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/CL/CLTensorHandle.cpp b/src/graph2/backends/CL/CLTensorHandle.cpp
new file mode 100644
index 0000000..f515e0b
--- /dev/null
+++ b/src/graph2/backends/CL/CLTensorHandle.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+CLTensorHandle::CLTensorHandle(const ITensorInfo &info)
+    : _tensor()
+{
+    _tensor.allocator()->init(info);
+}
+
+void CLTensorHandle::allocate()
+{
+    _tensor.allocator()->allocate();
+}
+
+const arm_compute::ITensor &CLTensorHandle::tensor() const
+{
+    return _tensor;
+}
+
+arm_compute::ITensor &CLTensorHandle::tensor()
+{
+    return _tensor;
+}
+
+void CLTensorHandle::map(bool blocking)
+{
+    _tensor.map(blocking);
+}
+
+void CLTensorHandle::unmap()
+{
+    _tensor.unmap();
+}
+
+bool CLTensorHandle::is_subtensor() const
+{
+    return false;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEDeviceBackend.cpp b/src/graph2/backends/NEON/NEDeviceBackend.cpp
new file mode 100644
index 0000000..9f24498
--- /dev/null
+++ b/src/graph2/backends/NEON/NEDeviceBackend.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NEDeviceBackend.h"
+
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/INode.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/backends/BackendRegistrar.h"
+#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
+#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/Allocator.h"
+#include "arm_compute/runtime/BlobLifetimeManager.h"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include "arm_compute/runtime/OffsetLifetimeManager.h"
+#include "arm_compute/runtime/PoolManager.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+/** Register NEON backend */
+static detail::BackendRegistrar<NEDeviceBackend> NEDeviceBackend_registrar(Target::NEON);
+
+NEDeviceBackend::NEDeviceBackend()
+    : _allocator()
+{
+}
+
+void NEDeviceBackend::initialize_backend()
+{
+}
+
+void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
+{
+    if(ctx.memory_management_ctx(Target::NEON) == nullptr)
+    {
+        MemoryManagerContext mm_ctx;
+        mm_ctx.target = Target::NEON;
+        mm_ctx.mm     = create_memory_manager(MemoryManagerAffinity::Buffer);
+
+        ctx.insert_memory_management_ctx(std::move(mm_ctx));
+    }
+}
+
+std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tensor)
+{
+    // Get tensor descriptor
+    const TensorDescriptor &tensor_desc = tensor.desc();
+    ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::NEON);
+
+    // Create backend tensor handle
+    TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
+    auto       backend_tensor_handle = support::cpp14::make_unique<NETensorHandle>(info);
+
+    return std::move(backend_tensor_handle);
+}
+
+std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords)
+{
+    if(parent == nullptr)
+    {
+        return nullptr;
+    }
+
+    return support::cpp14::make_unique<NESubTensorHandle>(parent, shape, coords);
+}
+
+std::unique_ptr<arm_compute::IFunction> NEDeviceBackend::configure_node(INode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring NEON node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::NEON);
+
+    // Configure node
+    return NEFunctionFactory::create(&node, ctx);
+}
+
+arm_compute::Status NEDeviceBackend::validate_node(const INode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NEON node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_UNUSED(node);
+
+    return Status{};
+}
+
+std::shared_ptr<arm_compute::IMemoryManager> NEDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
+{
+    std::shared_ptr<ILifetimeManager> lifetime_mgr = nullptr;
+    if(affinity == MemoryManagerAffinity::Buffer)
+    {
+        lifetime_mgr = std::make_shared<BlobLifetimeManager>();
+    }
+    else
+    {
+        lifetime_mgr = std::make_shared<OffsetLifetimeManager>();
+    }
+    auto pool_mgr = std::make_shared<PoolManager>();
+    auto mm       = std::make_shared<MemoryManagerOnDemand>(lifetime_mgr, pool_mgr);
+
+    mm->set_allocator(&_allocator);
+
+    return mm;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEFunctionFactory.cpp b/src/graph2/backends/NEON/NEFunctionFactory.cpp
new file mode 100644
index 0000000..9332103
--- /dev/null
+++ b/src/graph2/backends/NEON/NEFunctionFactory.cpp
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/graph2/Graph.h"
+#include "arm_compute/graph2/GraphContext.h"
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/TypePrinter.h"
+#include "arm_compute/graph2/backends/Utils.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+#include "arm_compute/runtime/NEON/NEFunctions.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute::utils::cast;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace
+{
+/** Returns backing tensor of a given tensor
+ *
+ * @param[in] tensor Tensor to extract the backing tensor from
+ *
+ * @return Backing tensor if present else nullptr
+ */
+arm_compute::ITensor *get_backing_tensor(arm_compute::graph2::Tensor *tensor)
+{
+    return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : &tensor->handle()->tensor();
+}
+
+/** Create a backend activation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend activation layer function
+ */
+std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON ActivationLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                  *input    = get_backing_tensor(node.input(0));
+    ITensor                  *output   = get_backing_tensor(node.output(0));
+    const ActivationLayerInfo act_info = node.activation_info();
+
+    // Create function
+    auto func = support::cpp14::make_unique<NEActivationLayer>();
+    func->configure(input, output, act_info);
+
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEActivationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Activation function: " << act_info.activation()
+                               << " a: " << act_info.a()
+                               << " b: " << act_info.b()
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend batch normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend batch normalization layer function
+ */
+std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON BatchNormalization node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+
+    // TODO (geopin01) : Var and mean are compulsory, switch function to accept nullptr as beta and/or gamma
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 5);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                  *input     = get_backing_tensor(node.input(0));
+    ITensor                  *mean      = get_backing_tensor(node.input(1));
+    ITensor                  *var       = get_backing_tensor(node.input(2));
+    ITensor                  *beta      = get_backing_tensor(node.input(3));
+    ITensor                  *gamma     = get_backing_tensor(node.input(4));
+    ITensor                  *output    = get_backing_tensor(node.output(0));
+    const float               epsilon   = node.epsilon();
+    const ActivationLayerInfo fused_act = node.fused_activation();
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEBatchNormalizationLayer>();
+    func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEBatchNormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Shape: " << input->info()->tensor_shape()
+                               << " Epsilon: " << epsilon << " "
+                               << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
+                               << " InPlace : " << is_in_place_operation(input, output)
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend convolution layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend convolution layer function
+ */
+std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                *input          = get_backing_tensor(node.input(0));
+    ITensor                *weights        = get_backing_tensor(node.input(1));
+    ITensor                *biases         = get_backing_tensor(node.input(2));
+    ITensor                *output         = get_backing_tensor(node.output(0));
+    const PadStrideInfo     conv_info      = node.convolution_info();
+    const ConvolutionMethod conv_algorithm = node.convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::NEON);
+    std::unique_ptr<IFunction>      func;
+    std::string                     func_name;
+    if(conv_algorithm == ConvolutionMethod::DIRECT)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEDirectConvolutionLayer>(std::string("NEDirectConvolutionLayer"), mm,
+                                                                                                   input, weights, biases, output, conv_info);
+    }
+    else if(conv_algorithm == ConvolutionMethod::GEMM)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEGEMMConvolutionLayer>(std::string("NEGEMMConvolutionLayer"), mm,
+                                                                                                 input, weights, biases, output, conv_info);
+    }
+    else if(conv_algorithm == ConvolutionMethod::WINOGRAD)
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEWinogradLayer>(std::string("NEWinogradLayer"), mm,
+                                                                                          input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_memory_managed_function<NEConvolutionLayer>(std::string("NEConvolutionLayer"), mm,
+                                                                                             input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend layer depth concatenate function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth concatenate layer function
+ */
+std::unique_ptr<arm_compute::IFunction> create_depth_concatenate_layer(DepthConcatenateLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON DepthConcatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Return nullptr if depth concatenate is switched off
+    if(!node.is_enabled())
+    {
+        return nullptr;
+    }
+
+    // Extract IO and info
+    std::vector<arm_compute::ITensor *> inputs;
+    for(unsigned int i = 0; i < node.num_inputs(); ++i)
+    {
+        inputs.push_back(get_backing_tensor(node.input(i)));
+    }
+    ITensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEDepthConcatenateLayer>();
+    func->configure(inputs, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEDepthConcatenateLayer"
+                               << " Data Type: " << output->info()->data_type()
+                               << " Shape: " << output->info()->tensor_shape()
+                               << " Num Inputs: " << inputs.size()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend layer depth-wise convolution function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend depth-wise convolution layer function
+ */
+std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                         *input         = get_backing_tensor(node.input(0));
+    ITensor                         *weights       = get_backing_tensor(node.input(1));
+    ITensor                         *biases        = get_backing_tensor(node.input(2));
+    ITensor                         *output        = get_backing_tensor(node.output(0));
+    const PadStrideInfo              conv_info     = node.convolution_info();
+    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
+
+    // Create and configure function (we assume that functions have been validated before creation)
+    std::unique_ptr<IFunction> func;
+    std::string                func_name;
+    if(dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3)
+    {
+        std::tie(func, func_name) = create_named_function<NEDepthwiseConvolutionLayer3x3>(std::string("NEDepthwiseConvolutionLayer3x3"),
+                                                                                          input, weights, biases, output, conv_info);
+    }
+    else
+    {
+        std::tie(func, func_name) = create_named_function<NEDepthwiseConvolutionLayer>(std::string("NEDepthwiseConvolutionLayer"),
+                                                                                       input, weights, biases, output, conv_info);
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+    return func;
+}
+
+/** Create a backend element-wise operation layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend element-wise operation layer function
+ */
+std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 2);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor               *input1     = get_backing_tensor(node.input(0));
+    ITensor               *input2     = get_backing_tensor(node.input(1));
+    ITensor               *output     = get_backing_tensor(node.output(0));
+    const EltwiseOperation eltwise_op = node.eltwise_operation();
+    ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+    ARM_COMPUTE_ERROR_ON(input2 == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    std::unique_ptr<IFunction> func = nullptr;
+    std::string                func_name;
+    if(eltwise_op == EltwiseOperation::ADD)
+    {
+        std::tie(func, func_name) = create_named_function<NEArithmeticAddition>(std::string("NEArithmeticAddition"),
+                                                                                input1, input2, output, ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::SUB)
+    {
+        std::tie(func, func_name) = create_named_function<NEArithmeticSubtraction>(std::string("NEArithmeticSubtraction"),
+                                                                                   input1, input2, output, ConvertPolicy::SATURATE);
+    }
+    else if(eltwise_op == EltwiseOperation::MUL)
+    {
+        std::tie(func, func_name) = create_named_function<NEPixelWiseMultiplication>(std::string("NEPixelWiseMultiplication"),
+                                                                                     input1, input2, output, 1.f,
+                                                                                     ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
+    }
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+                               << " Data Type: " << input1->info()->data_type()
+                               << " Shape : " << input1->info()->tensor_shape()
+                               << std::endl);
+
+    return func;
+}
+
+/** Create a backend flatten layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend flatten layer function
+ */
+std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON FlattenLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor *input  = get_backing_tensor(node.input(0));
+    ITensor *output = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEFlattenLayer>();
+    func->configure(input, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEFlattenLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend fully connected layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend fully connected layer function
+ */
+std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON FullyConnectedLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor *input   = get_backing_tensor(node.input(0));
+    ITensor *weights = get_backing_tensor(node.input(1));
+    ITensor *biases  = get_backing_tensor(node.input(2));
+    ITensor *output  = get_backing_tensor(node.output(0));
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEFullyConnectedLayer>(get_memory_manager(ctx, Target::NEON));
+    func->configure(input, weights, biases, output);
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(weights == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEFullyConnectedLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Weights shape: " << weights->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend normalization layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend normalization layer function
+ */
+std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON NormalizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor                     *input     = get_backing_tensor(node.input(0));
+    ITensor                     *output    = get_backing_tensor(node.output(0));
+    const NormalizationLayerInfo norm_info = node.normalization_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NENormalizationLayer>(get_memory_manager(ctx, Target::NEON));
+    func->configure(input, output, norm_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NENormalizationLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Normalization info: " << norm_info.type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend pooling layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend pooling layer function
+ */
+std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON PoolingLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor               *input     = get_backing_tensor(node.input(0));
+    ITensor               *output    = get_backing_tensor(node.output(0));
+    const PoolingLayerInfo pool_info = node.pooling_info();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEPoolingLayer>();
+    func->configure(input, output, pool_info);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEPoolingLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << " Pooling info: " << pool_info.pool_type()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend reshape layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend reshape layer function
+ */
+std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor *input  = get_backing_tensor(node.input(0));
+    ITensor *output = get_backing_tensor(node.output(0));
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NEReshapeLayer>();
+    func->configure(input, output);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NEReshapeLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+
+/** Create a backend softmax layer function
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend softmax layer function
+ */
+std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating NEON SoftmaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
+    ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    ITensor    *input  = get_backing_tensor(node.input(0));
+    ITensor    *output = get_backing_tensor(node.output(0));
+    const float beta   = node.beta();
+    ARM_COMPUTE_ERROR_ON(input == nullptr);
+    ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+    // Create and configure function
+    auto func = support::cpp14::make_unique<NESoftmaxLayer>(get_memory_manager(ctx, Target::NEON));
+    func->configure(input, output, beta);
+
+    // Log info
+    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated NESoftmaxLayer"
+                               << " Data Type: " << input->info()->data_type()
+                               << " Input shape: " << input->info()->tensor_shape()
+                               << " Output shape: " << output->info()->tensor_shape()
+                               << std::endl);
+
+    return std::move(func);
+}
+} // namespace
+
+std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &ctx)
+{
+    if(node == nullptr)
+    {
+        return nullptr;
+    }
+
+    NodeType type = node->type();
+    switch(type)
+    {
+        case NodeType::ActivationLayer:
+            return create_activation_layer(*polymorphic_downcast<ActivationLayerNode *>(node));
+        case NodeType::BatchNormalizationLayer:
+            return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
+        case NodeType::ConvolutionLayer:
+            return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+        case NodeType::DepthConcatenateLayer:
+            return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
+        case NodeType::DepthwiseConvolutionLayer:
+            return create_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+        case NodeType::EltwiseLayer:
+            return create_eltwise_layer(*polymorphic_downcast<EltwiseLayerNode *>(node));
+        case NodeType::FlattenLayer:
+            return create_flatten_layer(*polymorphic_downcast<FlattenLayerNode *>(node));
+        case NodeType::FullyConnectedLayer:
+            return create_fully_connected_layer(*polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
+        case NodeType::NormalizationLayer:
+            return create_normalization_layer(*polymorphic_downcast<NormalizationLayerNode *>(node), ctx);
+        case NodeType::PoolingLayer:
+            return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
+        case NodeType::ReshapeLayer:
+            return create_reshape_layer(*polymorphic_downcast<ReshapeLayerNode *>(node));
+        case NodeType::SoftmaxLayer:
+            return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
+        default:
+            return nullptr;
+    }
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NESubTensorHandle.cpp b/src/graph2/backends/NEON/NESubTensorHandle.cpp
new file mode 100644
index 0000000..9b3c9b1
--- /dev/null
+++ b/src/graph2/backends/NEON/NESubTensorHandle.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+NESubTensorHandle::NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords)
+    : _sub_tensor()
+{
+    ARM_COMPUTE_ERROR_ON(!parent_handle);
+    _sub_tensor = arm_compute::SubTensor(&parent_handle->tensor(), shape, coords);
+}
+
+void NESubTensorHandle::allocate()
+{
+    // noop
+}
+
+const arm_compute::ITensor &NESubTensorHandle::tensor() const
+{
+    return _sub_tensor;
+}
+
+arm_compute::ITensor &NESubTensorHandle::tensor()
+{
+    return _sub_tensor;
+}
+
+void NESubTensorHandle::map(bool blocking)
+{
+    ARM_COMPUTE_UNUSED(blocking);
+}
+
+void NESubTensorHandle::unmap()
+{
+    // noop
+}
+
+bool NESubTensorHandle::is_subtensor() const
+{
+    return true;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NETensorHandle.cpp b/src/graph2/backends/NEON/NETensorHandle.cpp
new file mode 100644
index 0000000..a4af8aa
--- /dev/null
+++ b/src/graph2/backends/NEON/NETensorHandle.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+NETensorHandle::NETensorHandle(const ITensorInfo &info)
+    : _tensor()
+{
+    _tensor.allocator()->init(info);
+}
+
+void NETensorHandle::allocate()
+{
+    _tensor.allocator()->allocate();
+}
+
+const arm_compute::ITensor &NETensorHandle::tensor() const
+{
+    return _tensor;
+}
+
+arm_compute::ITensor &NETensorHandle::tensor()
+{
+    return _tensor;
+}
+
+void NETensorHandle::map(bool blocking)
+{
+    ARM_COMPUTE_UNUSED(blocking);
+}
+
+void NETensorHandle::unmap()
+{
+}
+
+bool NETensorHandle::is_subtensor() const
+{
+    return false;
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file