COMPMID-1007: Add initial validate support to backend

Change-Id: I55eae35f35a3c7891e8d535907c861f022e43bea
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125470
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/arm_compute/graph2/IDeviceBackend.h b/arm_compute/graph2/IDeviceBackend.h
index 771ff85..2e8f3cb 100644
--- a/arm_compute/graph2/IDeviceBackend.h
+++ b/arm_compute/graph2/IDeviceBackend.h
@@ -88,7 +88,7 @@
      *
      * @return An error status
      */
-    virtual Status validate_node(const INode &node) = 0;
+    virtual Status validate_node(INode &node) = 0;
     /** Create a backend memory manager given its affinity
      *
      * @param[in] affinity Memory Manager affinity
diff --git a/arm_compute/graph2/backends/CL/CLDeviceBackend.h b/arm_compute/graph2/backends/CL/CLDeviceBackend.h
index c48a85f..77a8faf 100644
--- a/arm_compute/graph2/backends/CL/CLDeviceBackend.h
+++ b/arm_compute/graph2/backends/CL/CLDeviceBackend.h
@@ -57,7 +57,7 @@
     std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
     std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords) override;
     std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
-    Status validate_node(const INode &node) override;
+    Status validate_node(INode &node) override;
     std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
 
 private:
diff --git a/arm_compute/graph2/backends/CL/CLNodeValidator.h b/arm_compute/graph2/backends/CL/CLNodeValidator.h
new file mode 100644
index 0000000..251f705
--- /dev/null
+++ b/arm_compute/graph2/backends/CL/CLNodeValidator.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_CLNODEVALIDATOR_H__
+#define __ARM_COMPUTE_GRAPH2_CLNODEVALIDATOR_H__
+
+#include "arm_compute/core/Error.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+// Forward declarations
+class INode;
+
+namespace backends
+{
+class CLNodeValidator final
+{
+public:
+    /** Validate a node
+     *
+     * @param[in] node Node to validate
+     *
+     * @return An error status
+     */
+    static Status validate(INode *node);
+};
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+#endif //__ARM_COMPUTE_GRAPH2_CLNODEVALIDATOR_H__
diff --git a/arm_compute/graph2/backends/NEON/NEDeviceBackend.h b/arm_compute/graph2/backends/NEON/NEDeviceBackend.h
index 533a2c0..5d1394b 100644
--- a/arm_compute/graph2/backends/NEON/NEDeviceBackend.h
+++ b/arm_compute/graph2/backends/NEON/NEDeviceBackend.h
@@ -46,7 +46,7 @@
     std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
     std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords) override;
     std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
-    Status validate_node(const INode &node) override;
+    Status validate_node(INode &node) override;
     std::shared_ptr<arm_compute::IMemoryManager> create_memory_manager(MemoryManagerAffinity affinity) override;
 
 private:
diff --git a/arm_compute/graph2/backends/NEON/NENodeValidator.h b/arm_compute/graph2/backends/NEON/NENodeValidator.h
index 8e84485..d39ab15 100644
--- a/arm_compute/graph2/backends/NEON/NENodeValidator.h
+++ b/arm_compute/graph2/backends/NEON/NENodeValidator.h
@@ -24,15 +24,28 @@
 #ifndef __ARM_COMPUTE_GRAPH2_NENODEVALIDATOR_H__
 #define __ARM_COMPUTE_GRAPH2_NENODEVALIDATOR_H__
 
-#include "arm_compute/graph2/INodeVisitor.h"
+#include "arm_compute/core/Error.h"
 
 namespace arm_compute
 {
 namespace graph2
 {
+// Forward declarations
+class INode;
+
 namespace backends
 {
-// TODO (geopin01) : Add node validator
+class NENodeValidator final
+{
+public:
+    /** Validate a node
+     *
+     * @param[in] node Node to validate
+     *
+     * @return An error status
+     */
+    static Status validate(INode *node);
+};
 } // namespace backends
 } // namespace graph2
 } // namespace arm_compute
diff --git a/arm_compute/graph2/backends/ValidateHelpers.h b/arm_compute/graph2/backends/ValidateHelpers.h
new file mode 100644
index 0000000..0c93f0f
--- /dev/null
+++ b/arm_compute/graph2/backends/ValidateHelpers.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH2_BACKENDS_DETAIL_VALIDATE_HELPERS_H__
+#define __ARM_COMPUTE_GRAPH2_BACKENDS_DETAIL_VALIDATE_HELPERS_H__
+
+#include "arm_compute/graph2/Logger.h"
+#include "arm_compute/graph2/Tensor.h"
+#include "arm_compute/graph2/Types.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/ITensorInfo.h"
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+namespace detail
+{
+/** Returns backing tensor info of a given tensor
+ *
+ * @param[in] tensor Tensor to extract the backing tensor from
+ *
+ * @return Backing tensor tensor info if present else nullptr
+ */
+inline arm_compute::ITensorInfo *get_backing_tensor_info(arm_compute::graph2::Tensor *tensor)
+{
+    return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : tensor->handle()->tensor().info();
+}
+
+/** Validates a Convolution layer node
+ *
+ * @tparam ConvolutionLayer          Default Convolution layer function type
+ * @tparam DirectConvolutionLayer    Direct Convolution layer function type
+ * @tparam GEMMConvolutionLayer      GEMM Convolution layer function type
+ * @tparam WinogradConvolutionLayer  Winograd Convolution layer function type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename ConvolutionLayer, typename DirectConvolutionLayer, typename GEMMConvolutionLayer, typename WinogradConvolutionLayer>
+Status validate_convolution_layer(ConvolutionLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    arm_compute::ITensorInfo *input          = get_backing_tensor_info(node.input(0));
+    arm_compute::ITensorInfo *weights        = get_backing_tensor_info(node.input(1));
+    arm_compute::ITensorInfo *biases         = get_backing_tensor_info(node.input(2));
+    arm_compute::ITensorInfo *output         = get_backing_tensor_info(node.output(0));
+    const PadStrideInfo       conv_info      = node.convolution_info();
+    const ConvolutionMethod   conv_algorithm = node.convolution_method();
+
+    // Validate function
+    Status status{};
+    switch(conv_algorithm)
+    {
+        case ConvolutionMethod::DIRECT:
+            status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
+            break;
+        case ConvolutionMethod::GEMM:
+            status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info);
+            break;
+        case ConvolutionMethod::WINOGRAD:
+            status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info);
+            break;
+        default:
+            break;
+    }
+
+    // If validation fails try the Default approach
+    if(!bool(status) || (conv_algorithm == ConvolutionMethod::DEFAULT))
+    {
+        status = ConvolutionLayer::validate(input, weights, biases, output, conv_info);
+        if(bool(status))
+        {
+            ARM_COMPUTE_LOG_GRAPH_INFO("Switched ConvolutionLayer method of node with ID : "
+                                       << node.id() << " and Name: " << node.name() << std::endl);
+            node.set_convolution_method(ConvolutionMethod::DEFAULT);
+        }
+    }
+
+    return status;
+}
+
+/** Validates a Depthwise Convolution layer node
+ *
+ * @tparam DepthwiseConvolutionLayer    Default Depthwise Convolution layer type
+ * @tparam DepthwiseConvolutionLayer3x3 Optimized 3x3 Depthwise Convolution layer type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename DepthwiseConvolutionLayer, typename DepthwiseConvolutionLayer3x3>
+Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
+{
+    ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+    ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
+    ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
+
+    // Extract IO and info
+    arm_compute::ITensorInfo        *weights       = detail::get_backing_tensor_info(node.input(1));
+    const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
+    ARM_COMPUTE_ERROR_ON(weights == nullptr);
+
+    // TODO (geopin01) : Switch when validation is implemented
+    // Validate function
+    if((dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3) && (weights->tensor_shape().x() != 3))
+    {
+        ARM_COMPUTE_LOG_GRAPH_INFO("Switched DepthwiseConvolutionLayer method of node with ID : "
+                                   << node.id() << " and Name: " << node.name() << std::endl);
+        node.set_depthwise_convolution_method(DepthwiseConvolutionMethod::DEFAULT);
+    }
+
+    return Status{};
+}
+} // namespace detail
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_GRAPH2_BACKENDS_DETAIL_VALIDATE_HELPERS_H__ */
diff --git a/arm_compute/graph2/detail/ExecutionHelpers.h b/arm_compute/graph2/detail/ExecutionHelpers.h
index acd1654..e4523ec 100644
--- a/arm_compute/graph2/detail/ExecutionHelpers.h
+++ b/arm_compute/graph2/detail/ExecutionHelpers.h
@@ -50,6 +50,11 @@
  * @param[in] g Graph to allocate the tensors
  */
 void allocate_all_tensors(Graph &g);
+/** Validates all nodes
+ *
+ * @param[in] g Graph to validate
+ */
+void validate_all_nodes(Graph &g);
 /** Configures all nodes of graph
  *
  * @param[in] g   Graph to configure the nodes
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index d64512b..e97f3ac 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -54,7 +54,7 @@
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
         const int         target                   = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
         Target            target_hint              = set_target_hint2(target);
-        ConvolutionMethod convolution_hint         = ConvolutionMethod::GEMM;
+        ConvolutionMethod convolution_hint         = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
         bool              enable_tuning            = (target == 2);
         bool              enable_memory_management = true;
 
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index 9bb51ba..73a4450 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -56,6 +56,8 @@
         bool      enable_tuning            = (target == 2);
         bool      enable_memory_management = true;
 
+        ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+
         // Parse arguments
         if(argc < 2)
         {
@@ -90,7 +92,6 @@
 
         graph << target_hint << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
                                            get_input_accessor(image, std::move(preprocessor), false))
-
               << ConvolutionLayer(3U, 3U, 32U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_weights.npy"),
                                   std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0))
@@ -102,7 +103,7 @@
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_beta.npy"),
                                          0.001f)
               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
-
+              << convolution_hint
               << ConvolutionLayer(3U, 3U, 32U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_weights.npy"),
                                   std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0))
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index 6f76b5e..88073b7 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -60,6 +60,8 @@
         bool      enable_tuning            = (target == 2);
         bool      enable_memory_management = true;
 
+        ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+
         // Parse arguments
         if(argc < 2)
         {
@@ -104,6 +106,7 @@
                                          get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Conv2d_1a_3x3_BatchNorm_beta.npy"),
                                          0.001f)
               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+              << convolution_hint
               // Conv2d_2a_3x3
               << ConvolutionLayer(3U, 3U, 32U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv4_model/Conv2d_2a_3x3_weights.npy"),
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index 90debb4..a7d7abc 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -57,6 +57,8 @@
         bool      enable_tuning            = (target == 2);
         bool      enable_memory_management = true;
 
+        ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+
         // Parse arguments
         if(argc < 2)
         {
@@ -97,6 +99,7 @@
                   get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_weights.npy"),
                   std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
                   PadStrideInfo(2, 2, 3, 3))
+              << convolution_hint
               << BatchNormalizationLayer(
                   get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_BatchNorm_moving_mean.npy"),
                   get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_BatchNorm_moving_variance.npy"),
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index b4e00a4..92e6a38 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -58,6 +58,8 @@
         bool      enable_tuning            = (target == 2);
         bool      enable_memory_management = true;
 
+        ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+
         // Parse arguments
         if(argc < 2)
         {
@@ -100,6 +102,7 @@
                   PadStrideInfo(2, 2, 0, 0))
               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
               << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
+              << convolution_hint
               << ConvolutionLayer(
                   1U, 1U, 16U,
                   get_weights_accessor(data_path, "/cnn_data/squeezenet_v1.0_model/fire2_squeeze1x1_w.npy"),
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index 4ebfd3f..540784e 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -61,6 +61,8 @@
         bool      enable_tuning            = (target == 2);
         bool      enable_memory_management = true;
 
+        ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
+
         // Parse arguments
         if(argc < 2)
         {
@@ -96,6 +98,7 @@
         graph << target_hint
               << InputLayer(TensorDescriptor(TensorShape(227U, 227U, 3U, 1U), DataType::F32),
                             get_input_accessor(image, std::move(preprocessor)))
+              << ConvolutionMethod::DIRECT
               << ConvolutionLayer(
                   3U, 3U, 64U,
                   get_weights_accessor(data_path, "/cnn_data/squeezenet_v1_1_model/conv1_w.npy"),
@@ -103,6 +106,7 @@
                   PadStrideInfo(2, 2, 0, 0))
               << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
               << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
+              << convolution_hint
               << ConvolutionLayer(
                   1U, 1U, 16U,
                   get_weights_accessor(data_path, "/cnn_data/squeezenet_v1_1_model/fire2_squeeze1x1_w.npy"),
diff --git a/src/graph2/GraphManager.cpp b/src/graph2/GraphManager.cpp
index edbe2cc..e708dc6 100644
--- a/src/graph2/GraphManager.cpp
+++ b/src/graph2/GraphManager.cpp
@@ -64,6 +64,9 @@
     // Perform topological sort
     // FIXME : Sort nodes and pass sorted indices in configure all nodes
 
+    // Validate all nodes
+    detail::validate_all_nodes(graph);
+
     // Configure all nodes
     auto workload = detail::configure_all_nodes(graph, ctx);
     ARM_COMPUTE_ERROR_ON_MSG(workload.tasks.empty(), "Could not configure all nodes!");
diff --git a/src/graph2/backends/CL/CLDeviceBackend.cpp b/src/graph2/backends/CL/CLDeviceBackend.cpp
index e060331..28e0534 100644
--- a/src/graph2/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph2/backends/CL/CLDeviceBackend.cpp
@@ -30,6 +30,7 @@
 #include "arm_compute/graph2/Tensor.h"
 #include "arm_compute/graph2/backends/BackendRegistrar.h"
 #include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
+#include "arm_compute/graph2/backends/CL/CLNodeValidator.h"
 #include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
 #include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
 
@@ -145,13 +146,12 @@
     return CLFunctionFactory::create(&node, ctx);
 }
 
-arm_compute::Status CLDeviceBackend::validate_node(const INode &node)
+arm_compute::Status CLDeviceBackend::validate_node(INode &node)
 {
     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating CL node with ID : " << node.id() << std::endl);
+    ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::CL);
 
-    ARM_COMPUTE_UNUSED(node);
-
-    return Status{};
+    return CLNodeValidator::validate(&node);
 }
 
 std::shared_ptr<arm_compute::IMemoryManager> CLDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
diff --git a/src/graph2/backends/CL/CLNodeValidator.cpp b/src/graph2/backends/CL/CLNodeValidator.cpp
new file mode 100644
index 0000000..8512856
--- /dev/null
+++ b/src/graph2/backends/CL/CLNodeValidator.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/CL/CLNodeValidator.h"
+
+#include "arm_compute/graph2/backends/ValidateHelpers.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/CL/CLFunctions.h"
+
+using namespace arm_compute::utils::cast;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+Status CLNodeValidator::validate(INode *node)
+{
+    if(node == nullptr)
+    {
+        return Status{};
+    }
+
+    NodeType type = node->type();
+    switch(type)
+    {
+        case NodeType::ConvolutionLayer:
+            return detail::validate_convolution_layer<CLConvolutionLayer,
+                   CLDirectConvolutionLayer,
+                   CLGEMMConvolutionLayer,
+                   CLWinogradConvolutionLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
+        case NodeType::DepthwiseConvolutionLayer:
+            return detail::validate_depthwise_convolution_layer<CLDepthwiseConvolutionLayer,
+                   CLDepthwiseConvolutionLayer3x3>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+        default:
+            return Status{};
+    }
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/backends/NEON/NEDeviceBackend.cpp b/src/graph2/backends/NEON/NEDeviceBackend.cpp
index 9f24498..5569abf 100644
--- a/src/graph2/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph2/backends/NEON/NEDeviceBackend.cpp
@@ -30,6 +30,7 @@
 #include "arm_compute/graph2/Tensor.h"
 #include "arm_compute/graph2/backends/BackendRegistrar.h"
 #include "arm_compute/graph2/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph2/backends/NEON/NENodeValidator.h"
 #include "arm_compute/graph2/backends/NEON/NESubTensorHandle.h"
 #include "arm_compute/graph2/backends/NEON/NETensorHandle.h"
 
@@ -104,12 +105,12 @@
     return NEFunctionFactory::create(&node, ctx);
 }
 
-arm_compute::Status NEDeviceBackend::validate_node(const INode &node)
+arm_compute::Status NEDeviceBackend::validate_node(INode &node)
 {
     ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NEON node with ID : " << node.id() << std::endl);
-    ARM_COMPUTE_UNUSED(node);
+    ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::NEON);
 
-    return Status{};
+    return NENodeValidator::validate(&node);
 }
 
 std::shared_ptr<arm_compute::IMemoryManager> NEDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
diff --git a/src/graph2/backends/NEON/NENodeValidator.cpp b/src/graph2/backends/NEON/NENodeValidator.cpp
new file mode 100644
index 0000000..4620f4c
--- /dev/null
+++ b/src/graph2/backends/NEON/NENodeValidator.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph2/backends/NEON/NENodeValidator.h"
+
+#include "arm_compute/graph2/backends/ValidateHelpers.h"
+#include "arm_compute/graph2/nodes/Nodes.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/NEON/NEFunctions.h"
+
+using namespace arm_compute::utils::cast;
+
+namespace arm_compute
+{
+namespace graph2
+{
+namespace backends
+{
+Status NENodeValidator::validate(INode *node)
+{
+    if(node == nullptr)
+    {
+        return Status{};
+    }
+
+    NodeType type = node->type();
+    switch(type)
+    {
+        case NodeType::ConvolutionLayer:
+            return detail::validate_convolution_layer<NEConvolutionLayer,
+                   NEDirectConvolutionLayer,
+                   NEGEMMConvolutionLayer,
+                   NEWinogradLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
+        case NodeType::DepthwiseConvolutionLayer:
+            return detail::validate_depthwise_convolution_layer<NEDepthwiseConvolutionLayer,
+                   NEDepthwiseConvolutionLayer3x3>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+
+        default:
+            return Status{};
+    }
+}
+} // namespace backends
+} // namespace graph2
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/graph2/detail/ExecutionHelpers.cpp b/src/graph2/detail/ExecutionHelpers.cpp
index a7eba0f..48588f1 100644
--- a/src/graph2/detail/ExecutionHelpers.cpp
+++ b/src/graph2/detail/ExecutionHelpers.cpp
@@ -75,6 +75,23 @@
     }
 }
 
+void validate_all_nodes(Graph &g)
+{
+    auto &nodes = g.nodes();
+
+    // Create tasks
+    for(auto &node : nodes)
+    {
+        if(node != nullptr)
+        {
+            Target assigned_target = node->assigned_target();
+            auto   backend         = backends::BackendRegistry::get().find_backend(assigned_target);
+            ARM_COMPUTE_ERROR_ON_MSG(!backend, "Requested backend doesn't exist!");
+            backend->validate_node(*node);
+        }
+    }
+}
+
 ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx)
 {
     ExecutionWorkload workload;