IVGCVSW-5155 Update Arm NN API to allow for call to shape inference

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I0a2babe5b5b09eb81c9900dc3a05071034a0440b
diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp
index d7ccbd4..44438b2 100644
--- a/include/armnn/BackendOptions.hpp
+++ b/include/armnn/BackendOptions.hpp
@@ -11,6 +11,8 @@
 namespace armnn
 {
 
+struct BackendOptions;
+using NetworkOptions = std::vector<BackendOptions>;
 
 /// Struct for the users to pass backend specific options
 struct BackendOptions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index c0c52f9..6a143b0 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -4,6 +4,7 @@
 //
 #pragma once
 
+#include <armnn/BackendOptions.hpp>
 #include <armnn/Deprecated.hpp>
 #include <armnn/DescriptorsFwd.hpp>
 #include <armnn/ILayerVisitor.hpp>
@@ -11,7 +12,6 @@
 #include <armnn/Optional.hpp>
 #include <armnn/TensorFwd.hpp>
 #include <armnn/Types.hpp>
-#include <armnn/Deprecated.hpp>
 
 #include <memory>
 #include <vector>
@@ -105,8 +105,8 @@
 class INetwork
 {
 public:
-    static INetwork* CreateRaw();
-    static INetworkPtr Create();
+    static INetwork* CreateRaw(NetworkOptions networkOptions = {});
+    static INetworkPtr Create(NetworkOptions networkOptions = {});
     static void Destroy(INetwork* network);
 
     virtual Status PrintGraph() = 0;
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index cc33847..2a60072 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -489,7 +489,7 @@
     subgraph.Clear();
 }
 
-void Graph::InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod)
+void Graph::InferTensorInfos()
 {
     for (auto&& layer : TopologicalSort())
     {
@@ -511,8 +511,12 @@
             {
                 throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
             }
+
+            if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
+            {
+                layer->ValidateTensorShapesFromInputs();
+            }
         }
-        layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
     }
 }
 
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 9673df4..87e0da8 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -93,7 +93,11 @@
         const Graph& m_Graph;
     };
 
-    Graph() : m_LayersInOrder(true) {}
+    Graph(bool shapeInferenceMethod = false)
+        : m_LayersInOrder(true)
+        , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
+                                                        ShapeInferenceMethod::ValidateOnly)
+        {}
 
     Graph(const Graph& other);
 
@@ -200,7 +204,7 @@
     void SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer);
     void SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph);
 
-    void InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly);
+    void InferTensorInfos();
 
     void AttachObservable(IGraphObservable* const observable, GraphEvent notifyOnEvent) {
         m_Views[notifyOnEvent].emplace_back(observable);
@@ -260,6 +264,7 @@
     mutable bool m_LayersInOrder;
 
     std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
+    ShapeInferenceMethod m_ShapeInferenceMethod;
 };
 
 /// Common base class for layers in the graph.
@@ -401,6 +406,8 @@
         ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
     LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
 
+    layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+
     NotifyObservables(GraphEvent::LayerAdded, layer);
 
     return layer;
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 692ee32..dc211b7 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "Layer.hpp"
@@ -67,6 +67,10 @@
 
 bool OutputSlot::IsTensorInfoSet() const
 {
+    if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
+    {
+        GetOwningLayer().ValidateTensorShapesFromInputs();
+    }
     return GetOutputHandler().IsTensorInfoSet();
 }
 
@@ -191,6 +195,7 @@
              DataLayout layout,
              const char* name)
 : m_OutputHandlers(numOutputSlots)
+, m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
 , m_LayerName(name ? name : "")
 , m_Type(type)
 , m_BackendId()
@@ -354,18 +359,6 @@
                         % GetNameStr()
                         % location.AsString()));
         }
-        if(! GetInputSlot(i).GetConnection()->IsTensorInfoSet())
-        {
-            throw LayerValidationException(
-                boost::str(
-                    boost::format(
-                        "TensorInfo of Input connection #%1% must be set on connected OutputSlot for "
-                        "%2% layer %3% %4%")
-                        % i
-                        % GetLayerTypeAsCString(this->GetType())
-                        % GetNameStr()
-                        % location.AsString()));
-        }
     }
 }
 
@@ -448,15 +441,6 @@
                 outputShape.AreAllDimensionsSpecified(),
                 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
     }
-    else
-    {
-        if (outputShape.GetDimensionality() == Dimensionality::Specified)
-        {
-            ConditionalThrow<LayerValidationException>(
-                    !outputShape.AreAllDimensionsSpecified(),
-                    "No unspecified dimension while using ShapeInferenceMethod::InferAndValidate");
-        }
-    }
 }
 
 void Layer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index f1954b9..d4a24e4 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -228,6 +228,8 @@
         return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
     }
 
+    ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
+
     const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
     const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
 
@@ -277,8 +279,7 @@
 
     void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
 
-    virtual void ValidateTensorShapesFromInputs(
-                    ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) = 0;
+    virtual void ValidateTensorShapesFromInputs() = 0;
 
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
@@ -328,6 +329,11 @@
     }
     Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
 
+    void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
+    {
+        m_ShapeInferenceMethod = shapeInferenceMethod;
+    }
+
 protected:
     // Graph needs access to the virtual destructor.
     friend class Graph;
@@ -378,6 +384,7 @@
 
 protected:
     std::vector<OutputHandler> m_OutputHandlers;
+    ShapeInferenceMethod m_ShapeInferenceMethod;
 
 private:
     const std::string m_LayerName;
@@ -396,6 +403,7 @@
     LayerGuid m_Guid;
 
     std::list<std::string> m_RelatedLayerNames;
+
 };
 
 // A layer user-provided data can be bound to (e.g. inputs, outputs).
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index e0607bd..132924a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -42,14 +42,14 @@
 namespace armnn
 {
 
-armnn::INetwork* INetwork::CreateRaw()
+armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
 {
-    return new Network();
+    return new Network(networkOptions);
 }
 
-armnn::INetworkPtr INetwork::Create()
+armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
 {
-    return INetworkPtr(CreateRaw(), &INetwork::Destroy);
+    return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
 }
 
 void INetwork::Destroy(INetwork* network)
@@ -1147,11 +1147,19 @@
 
     return optNet;
 }
-
-Network::Network()
-: m_Graph(std::make_unique<Graph>())
+bool Network::GetShapeInferenceMethod()
 {
+    if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
+    {
+        return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
+    }
+
+    return false;
 }
+Network::Network(NetworkOptions networkOptions)
+: m_NetworkOptions(networkOptions),
+  m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
+{}
 
 Network::~Network()
 {
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 6bc0ac7..77d6b04 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -28,7 +28,7 @@
 class Network final : public INetwork
 {
 public:
-    Network();
+    Network(NetworkOptions networkOptions = {});
     ~Network();
 
     const Graph& GetGraph() const { return *m_Graph; }
@@ -269,6 +269,9 @@
         const Optional<ConstTensor>& biases,
         const char* name);
 
+    bool GetShapeInferenceMethod();
+    NetworkOptions m_NetworkOptions;
+
     std::unique_ptr<Graph> m_Graph;
 };
 
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index ccee524..e04fcbb 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -30,18 +30,18 @@
     return CloneBase<AbsLayer>(graph, GetName());
 }
 
-void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void AbsLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "AbsLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
 }
 
 void AbsLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp
index ab31014..0e5ccb0 100644
--- a/src/armnn/layers/AbsLayer.hpp
+++ b/src/armnn/layers/AbsLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref AbsLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index c6443b6..d3d02c3 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -28,19 +28,19 @@
     return CloneBase<ActivationLayer>(graph, m_Param, GetName());
 }
 
-void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ActivationLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ActivationLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
 }
 
 void ActivationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp
index 3f0d520..5ffcc3e 100644
--- a/src/armnn/layers/ActivationLayer.hpp
+++ b/src/armnn/layers/ActivationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -24,8 +24,7 @@
 
     /// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/AdditionLayer.hpp b/src/armnn/layers/AdditionLayer.hpp
index 4c80e5c..4af576a 100644
--- a/src/armnn/layers/AdditionLayer.hpp
+++ b/src/armnn/layers/AdditionLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index e288d16..bd914ec 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -69,19 +69,19 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-void ArgMinMaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ArgMinMaxLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
 }
 
 void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
index 27cfb20..761d4a0 100644
--- a/src/armnn/layers/ArgMinMaxLayer.hpp
+++ b/src/armnn/layers/ArgMinMaxLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,8 +32,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ArgMinMaxLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index fa589db..625e0d4 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -48,19 +48,19 @@
     return std::move(layer);
 }
 
-void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchNormalizationLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchNormalizationLayer");
 
 }
 
diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp
index 88db81f..3915897 100644
--- a/src/armnn/layers/BatchNormalizationLayer.hpp
+++ b/src/armnn/layers/BatchNormalizationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -37,8 +37,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref BatchNormalizationLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index fe99e9e..1a5cfa6 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -41,19 +41,19 @@
     return std::move(layer);
 }
 
-void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape &outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchToSpaceNdLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchToSpaceNdLayer");
 }
 
 std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp
index b99dc36..da7585b 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.hpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref BatchToSpaceNdLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 4dd3781..a9639e8 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -55,13 +55,13 @@
     return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
 }
 
-void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ComparisonLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -69,7 +69,7 @@
     });
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ComparisonLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
 }
 
 void ComparisonLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp
index e20bcdf..bcb0dc2 100644
--- a/src/armnn/layers/ComparisonLayer.hpp
+++ b/src/armnn/layers/ComparisonLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s) will lead to a valid configuration
     /// of @ref ComparisonLayer
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 4f0aa53..d9fffff 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -242,7 +242,7 @@
     return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
 }
 
-void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConcatLayer::ValidateTensorShapesFromInputs()
 {
     // Validates Concat layer.
     ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -254,7 +254,7 @@
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inputShapes;
     for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
@@ -266,7 +266,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConcatLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
 }
 
 void ConcatLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 5bb11ba..84eba2e 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -35,8 +35,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ConcatLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index ff4c57c..cd8a056 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -40,9 +40,8 @@
     return std::vector<TensorShape>({  inputShapes[0] });
 }
 
-void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConstantLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
 
     // Get the output shape from the value of the constant layer.
     TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape();
diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp
index 23183d2..36fa1f9 100644
--- a/src/armnn/layers/ConstantLayer.hpp
+++ b/src/armnn/layers/ConstantLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -28,8 +28,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ConstantLayer
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 250ecfa..81bb4d9 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -30,19 +30,19 @@
     return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
 }
 
-void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertBf16ToFp32Layer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
 }
 
 void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
index 136cfed..d9df0bd 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index f86397f..709ca13 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -30,19 +30,19 @@
     return CloneBase<ConvertFp16ToFp32Layer>(graph, GetName());
 }
 
-void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertFp16ToFp32Layer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
 }
 
 void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
index c4ac13b..4eadb9f 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index 1505245..9b02b2f 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -30,20 +30,20 @@
     return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName());
 }
 
-void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
 {
 
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
 
 void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
index 096dc7e..57fbe13 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 0a126e2..7b2df00 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -29,20 +29,20 @@
     return CloneBase<ConvertFp32ToFp16Layer>(graph, GetName());
 }
 
-void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
 {
 
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
 
 void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
index c8a5055..5652a47 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -25,8 +25,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index a1535ea..5fff982 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -112,13 +112,13 @@
     return std::vector<TensorShape>({ tensorShape });
 }
 
-void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void Convolution2dLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     // check if we m_Weight data is not nullptr
     ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
@@ -129,7 +129,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Convolution2dLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
 }
 
 Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp
index e88b44d..4dd1497 100644
--- a/src/armnn/layers/Convolution2dLayer.hpp
+++ b/src/armnn/layers/Convolution2dLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -34,8 +34,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref Convolution2dLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 40bc8fe..c29421f 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -34,20 +34,20 @@
     return CloneBase<DebugLayer>(graph, GetName());
 }
 
-void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DebugLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DebugLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
 }
 
 void DebugLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index 227e056..e71e05a 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref DebugLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 4b7c41d..dae557e 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -57,20 +57,20 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthToSpaceLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
 }
 
 void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp
index a0ecdcf..0730d4d 100644
--- a/src/armnn/layers/DepthToSpaceLayer.hpp
+++ b/src/armnn/layers/DepthToSpaceLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref DepthToSpaceLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 98d9e82..8a7cf23 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -119,13 +119,13 @@
     return std::vector<TensorShape>{ tensorShape };
 }
 
-void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     // on this level constant data should not be released..
     ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
@@ -137,7 +137,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthwiseConvolution2dLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
 }
 
 Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
index 7b42a5f..dd0b0e6 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 79ef0cb..f798882 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -29,20 +29,20 @@
     return CloneBase<DequantizeLayer>(graph, GetName());
 }
 
-void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DequantizeLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DequantizeLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
 }
 
 void DequantizeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp
index 24c9869..a5750dd 100644
--- a/src/armnn/layers/DequantizeLayer.hpp
+++ b/src/armnn/layers/DequantizeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref DequantizeLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index fddf86f..b18781b 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -34,13 +34,13 @@
     return std::move(layer);
 }
 
-void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     // on this level constant data should not be released.
     ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
@@ -53,21 +53,21 @@
     const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes });
     const TensorShape& inferredNumberDetections = TensorShape({ 1 });
 
-    ValidateAndCopyShape(outputShape, inferredDetectionBoxes, shapeInferenceMethod, "DetectionPostProcessLayer");
+    ValidateAndCopyShape(outputShape, inferredDetectionBoxes, m_ShapeInferenceMethod, "DetectionPostProcessLayer");
 
     ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(),
                          inferredDetectionScores,
-                         shapeInferenceMethod,
+                         m_ShapeInferenceMethod,
                          "DetectionPostProcessLayer", 1);
 
     ValidateAndCopyShape(GetOutputSlot(2).GetTensorInfo().GetShape(),
                          inferredDetectionScores,
-                         shapeInferenceMethod,
+                         m_ShapeInferenceMethod,
                          "DetectionPostProcessLayer", 2);
 
     ValidateAndCopyShape(GetOutputSlot(3).GetTensorInfo().GetShape(),
                          inferredNumberDetections,
-                         shapeInferenceMethod,
+                         m_ShapeInferenceMethod,
                          "DetectionPostProcessLayer", 3);
 }
 
diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp
index d3c604f..374eef5 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.hpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,8 +32,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref DetectionPostProcessLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/DivisionLayer.hpp b/src/armnn/layers/DivisionLayer.hpp
index 0a9b9fe..4427a4c 100644
--- a/src/armnn/layers/DivisionLayer.hpp
+++ b/src/armnn/layers/DivisionLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index e063293..b4a3cea 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -47,13 +47,13 @@
     return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
 }
 
-void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -62,7 +62,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
 
 } // namespace armnn
diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp
index 1f9888a..3893dcd 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.hpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -19,8 +19,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of the element wise operation.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 5592c20..cf4c2fc 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -40,19 +40,19 @@
     return std::vector<TensorShape>({ input });
 }
 
-void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
 
 void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp
index ae88fcf..f6f8862 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.hpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,8 +32,7 @@
     /// Check if the input tensor shape(s) will lead to a valid configuration
     /// of @ref ElementwiseUnaryLayer
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 3a1d0d1..ab41324 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -29,19 +29,19 @@
     return CloneBase<FakeQuantizationLayer>(graph, m_Param, GetName());
 }
 
-void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FakeQuantizationLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
 }
 
 void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp
index dc22c23..09bd530 100644
--- a/src/armnn/layers/FakeQuantizationLayer.hpp
+++ b/src/armnn/layers/FakeQuantizationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref FakeQuantizationLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 174fcf7..329a30a 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -29,13 +29,13 @@
     return CloneBase<FillLayer>(graph, m_Param, GetName());
 }
 
-void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FillLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp
index aa12fca..eeed141 100644
--- a/src/armnn/layers/FillLayer.hpp
+++ b/src/armnn/layers/FillLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -25,8 +25,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref FillLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 04d847e..5ff9a9a 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -29,18 +29,18 @@
     return CloneBase<FloorLayer>(graph, GetName());
 }
 
-void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FloorLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FloorLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
 }
 
 void FloorLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp
index 68361d0..07cf151 100644
--- a/src/armnn/layers/FloorLayer.hpp
+++ b/src/armnn/layers/FloorLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref FloorLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 174459b..f10beda 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -61,13 +61,11 @@
     return std::vector<TensorShape>({ TensorShape({batches, weightShape[dimIdx]})});
 }
 
-void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FullyConnectedLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
-
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     // check if we m_Weight data is not nullptr
     ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
@@ -78,7 +76,7 @@
     ARMNN_ASSERT(inferredShapes.size() == 1);
     ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FullyConnectedLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FullyConnectedLayer");
 }
 
 Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp
index a2d0750..bbacd25 100644
--- a/src/armnn/layers/FullyConnectedLayer.hpp
+++ b/src/armnn/layers/FullyConnectedLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref FullyConnectedLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index 52bf432..e5d4a18 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -29,13 +29,13 @@
     return CloneBase<GatherLayer>(graph, m_Param, GetName());
 }
 
-void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void GatherLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo();
     const TensorInfo& indices = GetInputSlot(1).GetConnection()->GetTensorInfo();
@@ -68,7 +68,7 @@
 
     const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
 
-    ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "GatherLayer");
+    ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "GatherLayer");
 }
 
 void GatherLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp
index d8737ad..010af37 100644
--- a/src/armnn/layers/GatherLayer.hpp
+++ b/src/armnn/layers/GatherLayer.hpp
@@ -27,8 +27,7 @@
     /// Check if the input tensor shape(s).
     /// will lead to a valid configuration of @ref GatherLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index 5a78ecc..0f96611 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -28,12 +28,10 @@
     return CloneBase<InputLayer>(graph, GetBindingId(), GetName());
 }
 
-void InputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void InputLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
-
     //The input layer should already have it's inputs set during graph building phase in the driver/parser.
-    ConditionalThrow<LayerValidationException>(GetOutputSlot(0).IsTensorInfoSet(),
+    ConditionalThrow<LayerValidationException>(GetOutputHandler(0).IsTensorInfoSet(),
                                                "InputLayer should already have the TensorInfo set.");
 }
 
diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp
index 430abcb..ff6b521 100644
--- a/src/armnn/layers/InputLayer.hpp
+++ b/src/armnn/layers/InputLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref InputLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 4f753e2..eb6fe90 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -29,19 +29,19 @@
     return CloneBase<InstanceNormalizationLayer>(graph, m_Param, GetName());
 }
 
-void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "InstanceNormalizationLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
 }
 
 void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp
index affc028..799cf28 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.hpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref InstanceNormalizationLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index d4ac801..ab2b094 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -29,19 +29,19 @@
     return CloneBase<L2NormalizationLayer>(graph, m_Param, GetName());
 }
 
-void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void L2NormalizationLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "L2NormalizationLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
 }
 
 void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp
index 1c7e483..5d58077 100644
--- a/src/armnn/layers/L2NormalizationLayer.hpp
+++ b/src/armnn/layers/L2NormalizationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref L2NormalizationLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/LayerCloneBase.hpp b/src/armnn/layers/LayerCloneBase.hpp
index 3671d66..a8ff52b 100644
--- a/src/armnn/layers/LayerCloneBase.hpp
+++ b/src/armnn/layers/LayerCloneBase.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -17,6 +17,7 @@
 
     layer->SetBackendId(GetBackendId());
     layer->SetGuid(GetGuid());
+    layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
 
     return layer;
 }
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index f834cce..1620acb 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -29,18 +29,18 @@
     return CloneBase<LogSoftmaxLayer>(graph, m_Param, GetName());
 }
 
-void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LogSoftmaxLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
 }
 
 void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp
index a1907b9..b21bece 100644
--- a/src/armnn/layers/LogSoftmaxLayer.hpp
+++ b/src/armnn/layers/LogSoftmaxLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,8 +27,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref LogSoftmaxLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 44f5d1f..724bd6b 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -163,13 +163,13 @@
     return outShapes;
 }
 
-void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void LstmLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(3, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes( {
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -208,7 +208,7 @@
         ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
                          "LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
 
-        ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer");
+        ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
     }
     else
     {
@@ -219,7 +219,7 @@
         ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
             "LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
 
-        ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer");
+        ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
     }
 
     if (m_Param.m_ProjectionEnabled)
@@ -243,11 +243,11 @@
     }
 
     ValidateAndCopyShape(
-            GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "LstmLayer", 1);
+            GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "LstmLayer", 1);
     ValidateAndCopyShape(
-            GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "LstmLayer", 2);
+            GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], m_ShapeInferenceMethod, "LstmLayer", 2);
     ValidateAndCopyShape(
-            GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], shapeInferenceMethod, "LstmLayer", 3);
+            GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], m_ShapeInferenceMethod, "LstmLayer", 3);
 
     if (m_Param.m_LayerNormEnabled)
     {
diff --git a/src/armnn/layers/LstmLayer.hpp b/src/armnn/layers/LstmLayer.hpp
index c7e4dd4..51348d7 100644
--- a/src/armnn/layers/LstmLayer.hpp
+++ b/src/armnn/layers/LstmLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -97,8 +97,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref LstmLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/MaximumLayer.hpp b/src/armnn/layers/MaximumLayer.hpp
index c90a30e..743f79b 100644
--- a/src/armnn/layers/MaximumLayer.hpp
+++ b/src/armnn/layers/MaximumLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index da63566..a1a3a40 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -38,13 +38,13 @@
     return std::move(layer);
 }
 
-void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MeanLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
 
@@ -97,7 +97,7 @@
     }
     const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data());
 
-    ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "MeanLayer");
+    ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
 }
 
 void MeanLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
index 3aacd59..3a094bf 100644
--- a/src/armnn/layers/MeanLayer.hpp
+++ b/src/armnn/layers/MeanLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,8 +27,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref MeanLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index b4fe68b..854b4f6 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -33,19 +33,19 @@
     return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
 }
 
-void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MemCopyLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemCopyLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
 }
 
 void MemCopyLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp
index 10a9f55..996d687 100644
--- a/src/armnn/layers/MemCopyLayer.hpp
+++ b/src/armnn/layers/MemCopyLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref MemCopyLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index a356f05..d9148fb 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -33,19 +33,19 @@
     return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
 }
 
-void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MemImportLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemImportLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
 }
 
 void MemImportLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp
index 9b9c888..1cbdaac 100644
--- a/src/armnn/layers/MemImportLayer.hpp
+++ b/src/armnn/layers/MemImportLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref MemImportLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index d8351c6..74a31a8 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -27,13 +27,13 @@
     return CloneBase<MergeLayer>(graph, GetName());
 }
 
-void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MergeLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
@@ -42,7 +42,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MergeLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MergeLayer");
 }
 
 std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp
index 3d0cf52..07f6900 100644
--- a/src/armnn/layers/MergeLayer.hpp
+++ b/src/armnn/layers/MergeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref MergeLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// Infers the output shapes from given input shapes.
     /// @param [in] inputShapes The input shapes layer has.
diff --git a/src/armnn/layers/MergerLayer.hpp b/src/armnn/layers/MergerLayer.hpp
index 3271060..8309d31 100644
--- a/src/armnn/layers/MergerLayer.hpp
+++ b/src/armnn/layers/MergerLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
diff --git a/src/armnn/layers/MinimumLayer.hpp b/src/armnn/layers/MinimumLayer.hpp
index 03ca031..2db0629 100644
--- a/src/armnn/layers/MinimumLayer.hpp
+++ b/src/armnn/layers/MinimumLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/MultiplicationLayer.hpp b/src/armnn/layers/MultiplicationLayer.hpp
index 752765b..692f407 100644
--- a/src/armnn/layers/MultiplicationLayer.hpp
+++ b/src/armnn/layers/MultiplicationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index e8176ce..b75bb33 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -29,19 +29,19 @@
     return CloneBase<NormalizationLayer>(graph, m_Param, GetName());
 }
 
-void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void NormalizationLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "NormalizationLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
 }
 
 void NormalizationLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp
index 25787a8..00a4435 100644
--- a/src/armnn/layers/NormalizationLayer.hpp
+++ b/src/armnn/layers/NormalizationLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref NormalizationLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index db76244..d14337f 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -29,9 +29,8 @@
     return CloneBase<OutputLayer>(graph, GetBindingId(), GetName());
 }
 
-void OutputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void OutputLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
 
     // Just validates that the input is connected.
     ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr,
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 26c5a0a..6315e25 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -38,8 +38,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref OutputLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 296bfa9..4fcbc77 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -36,9 +36,8 @@
     return std::move(layer);
 }
 
-void PadLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PadLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
 
     return;
 }
diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp
index a15563d..16cdbf5 100644
--- a/src/armnn/layers/PadLayer.hpp
+++ b/src/armnn/layers/PadLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,8 +27,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref PadLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index b9380aa..3c4d1ee 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -40,19 +40,19 @@
     return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
 }
 
-void PermuteLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PermuteLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PermuteLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
 }
 
 void PermuteLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp
index 9af1d9b..67be2e1 100644
--- a/src/armnn/layers/PermuteLayer.hpp
+++ b/src/armnn/layers/PermuteLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -28,8 +28,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref PermuteLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index bed49ee..5411695 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -100,19 +100,19 @@
     return std::vector<TensorShape>({ tensorShape });
 }
 
-void Pooling2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void Pooling2dLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Pooling2dLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
 }
 
 void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp
index 2a5703b..90c9a44 100644
--- a/src/armnn/layers/Pooling2dLayer.hpp
+++ b/src/armnn/layers/Pooling2dLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref Pooling2dLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index e043fac..afc9877 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -35,9 +35,8 @@
     return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
 }
 
-void PreCompiledLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PreCompiledLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
 
     // NOTE: since the PreCompiledLayer is an internal layer created from a valid SubgraphView,
     // we do not need to validate its input shapes
diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp
index a4b1c78..a4851c7 100644
--- a/src/armnn/layers/PreCompiledLayer.hpp
+++ b/src/armnn/layers/PreCompiledLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -29,8 +29,7 @@
 
     PreCompiledLayer* Clone(Graph &graph) const override;
 
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
 
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index fcf50f2..a57aa85 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -94,13 +94,13 @@
     return { outputShape };
 }
 
-void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PreluLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes(
     {
@@ -110,7 +110,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PreluLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
 }
 
 void PreluLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
index 6febdf9..511be29 100644
--- a/src/armnn/layers/PreluLayer.hpp
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref PreluLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 321d985..4d0d57c 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -165,13 +165,13 @@
     return outShapes;
 }
 
-void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QLstmLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(3, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes(
     {
@@ -211,7 +211,7 @@
         ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
                 "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
 
-        ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer");
+        ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
     }
     else
     {
@@ -223,7 +223,7 @@
         ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
                 "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
 
-        ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer");
+        ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
     }
 
     if (m_Param.m_ProjectionEnabled)
@@ -247,9 +247,9 @@
     }
 
     ValidateAndCopyShape(
-            GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "QLstmLayer", 1);
+            GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "QLstmLayer", 1);
     ValidateAndCopyShape(
-            GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "QLstmLayer", 2);
+            GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], m_ShapeInferenceMethod, "QLstmLayer", 2);
 
     if (m_Param.m_LayerNormEnabled)
     {
diff --git a/src/armnn/layers/QLstmLayer.hpp b/src/armnn/layers/QLstmLayer.hpp
index 0178933..5757ef6 100644
--- a/src/armnn/layers/QLstmLayer.hpp
+++ b/src/armnn/layers/QLstmLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -99,8 +99,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref QLstmLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index 990d2b4..aad6dd8 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -29,17 +29,17 @@
     return clone;
 }
 
-void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QuantizeLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizeLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
 }
 
 void QuantizeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
index a223f59..2f331a4 100644
--- a/src/armnn/layers/QuantizeLayer.hpp
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -21,8 +21,7 @@
 
     Layer* Clone(Graph& graph) const override;
 
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 432d50d..ad22761 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -91,13 +91,13 @@
     return outShapes;
 }
 
-void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(3, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes(
     {
@@ -137,11 +137,11 @@
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
 
     // Check output TensorShape(s) match inferred shape
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizedLstmLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizedLstmLayer");
 
     ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(),
                          inferredShapes[1],
-                         shapeInferenceMethod,
+                         m_ShapeInferenceMethod,
                          "QuantizedLstmLayer",
                          1);
 }
diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp
index 1353a06..bfe86a4 100644
--- a/src/armnn/layers/QuantizedLstmLayer.hpp
+++ b/src/armnn/layers/QuantizedLstmLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -61,8 +61,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref QuantizedLstmLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 62f77df..2e70134 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -29,16 +29,15 @@
     return clone;
 }
 
-void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void RankLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
     const TensorShape inferredShape = TensorShape(Dimensionality::Scalar);
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
-    ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "RankLayer");
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+    ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
 }
 void RankLayer::Accept(ILayerVisitor& visitor) const
 {
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
index e160d60..f4f1ec9 100644
--- a/src/armnn/layers/RankLayer.hpp
+++ b/src/armnn/layers/RankLayer.hpp
@@ -20,7 +20,7 @@
 
         Layer* Clone(Graph& graph) const override;
 
-        void ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) override;
+        void ValidateTensorShapesFromInputs() override;
 
         void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3f955a5..5265316 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -36,19 +36,19 @@
     return std::vector<TensorShape>({ m_Param.m_TargetShape });
 }
 
-void ReshapeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ReshapeLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ReshapeLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
 }
 
 void ReshapeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp
index 4f0300a..78335e6 100644
--- a/src/armnn/layers/ReshapeLayer.hpp
+++ b/src/armnn/layers/ReshapeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -28,8 +28,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ReshapeLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index ab8430a..53af5f9 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -58,19 +58,19 @@
     return std::vector<TensorShape>({ tensorShape });
 }
 
-void ResizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ResizeLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ResizeLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
 }
 
 void ResizeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
index 0adda94..3462585 100644
--- a/src/armnn/layers/ResizeLayer.hpp
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ResizeLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index a68b5a4..e85d865 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -30,19 +30,19 @@
     return CloneBase<RsqrtLayer>(graph, GetName());
 }
 
-void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void RsqrtLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "RsqrtLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
 }
 
 void RsqrtLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp
index d4183ef..4fcbf72 100644
--- a/src/armnn/layers/RsqrtLayer.hpp
+++ b/src/armnn/layers/RsqrtLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref RsqrtLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index a31f603..0f1d438 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -33,19 +33,19 @@
     return CloneBase<SliceLayer>(graph, m_Param, GetName());
 }
 
-void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SliceLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SliceLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SliceLayer");
 }
 
 std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp
index 3d9a7fe..0505a05 100644
--- a/src/armnn/layers/SliceLayer.hpp
+++ b/src/armnn/layers/SliceLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref SliceLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 3da2cb2..32d3a11 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -29,19 +29,19 @@
     return CloneBase<SoftmaxLayer>(graph, m_Param, GetName());
 }
 
-void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SoftmaxLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SoftmaxLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
 }
 
 void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp
index 84aae85..cbdd7c5 100644
--- a/src/armnn/layers/SoftmaxLayer.hpp
+++ b/src/armnn/layers/SoftmaxLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref SoftmaxLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index 4eba066..decb6e6 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -66,20 +66,20 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToBatchNdLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
 }
 
 void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp
index 707017b..28857d8 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.hpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 4695d81..72d8230 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -59,20 +59,20 @@
     return std::vector<TensorShape>({ outputShape });
 }
 
-void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToDepthLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
 }
 
 void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp
index ca0d804..a8bc108 100644
--- a/src/armnn/layers/SpaceToDepthLayer.hpp
+++ b/src/armnn/layers/SpaceToDepthLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref SpaceToDepthLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 60dc961..2d469b0 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -139,11 +139,11 @@
     return outShapes;
 }
 
-void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SplitterLayer::ValidateTensorShapesFromInputs()
 {
     std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
     {
-        VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), shapeInferenceMethod);
+        VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), m_ShapeInferenceMethod);
     });
 
     std::vector<TensorShape> views;
@@ -161,7 +161,7 @@
     {
         ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
                              inferredShapes[viewIdx],
-                             shapeInferenceMethod,
+                             m_ShapeInferenceMethod,
                              "SplitterLayer",
                              viewIdx);
     }
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 39aab90..bd20890 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -36,8 +36,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref SplitterLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index f5d761b..7150576 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -58,7 +58,7 @@
     return std::vector<TensorShape>({ targetShape });
 }
 
-void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StackLayer::ValidateTensorShapesFromInputs()
 {
     // Validates Stack layer.
     ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -70,7 +70,7 @@
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     // Constructs and validates input shapes
     std::vector<TensorShape> inputShapes;
@@ -90,7 +90,7 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StackLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
 }
 
 void StackLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
index 5e937db..3d05da0 100644
--- a/src/armnn/layers/StackLayer.hpp
+++ b/src/armnn/layers/StackLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref StackLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// By default returns inputShapes if the number of inputs are equal to number of outputs,
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index 623f4a5..6281f3e 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -34,9 +34,8 @@
     throw Exception("Stand in layer does not support infering output shapes");
 }
 
-void StandInLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StandInLayer::ValidateTensorShapesFromInputs()
 {
-    IgnoreUnused(shapeInferenceMethod);
 
     // Cannot validate this layer since no implementation details can be known by the framework
     // so do nothing here.
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
index a7e4a2c..2864753 100644
--- a/src/armnn/layers/StandInLayer.hpp
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,8 +27,7 @@
     /// Check if the input tensor shape(s)
     /// Does nothing since cannot validate any properties of this layer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// Empty implementation that throws Exception if called.
     /// otherwise infers the output shapes from given input shapes and layer properties.
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index fc9df85..9b1706b 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -94,19 +94,19 @@
         TensorShape(boost::numeric_cast<unsigned int>(outputShape.size()), &outputShape[0]) });
 }
 
-void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StridedSliceLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StridedSliceLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
 }
 
 void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp
index f9ba7e2..35ac370 100644
--- a/src/armnn/layers/StridedSliceLayer.hpp
+++ b/src/armnn/layers/StridedSliceLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -32,8 +32,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref StridedSliceLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/SubtractionLayer.hpp b/src/armnn/layers/SubtractionLayer.hpp
index 21619f8..527b50b 100644
--- a/src/armnn/layers/SubtractionLayer.hpp
+++ b/src/armnn/layers/SubtractionLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index b763b08..d905f52 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -27,13 +27,13 @@
     return CloneBase<SwitchLayer>(graph, GetName());
 }
 
-void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SwitchLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
 
@@ -44,10 +44,10 @@
 
     ARMNN_ASSERT(inferredShapes.size() == 2);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SwitchLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SwitchLayer");
 
     ValidateAndCopyShape(
-            GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "SwitchLayer", 1);
+            GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
 }
 
 void SwitchLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp
index 7022348..025f379 100644
--- a/src/armnn/layers/SwitchLayer.hpp
+++ b/src/armnn/layers/SwitchLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -26,8 +26,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref SwitchLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     void Accept(ILayerVisitor& visitor) const override;
 
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 7074be9..9287389 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -101,13 +101,13 @@
     return std::vector<TensorShape>({ tensorShape });
 }
 
-void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
 
@@ -127,7 +127,7 @@
 
     ARMNN_ASSERT(expectedOutputShape.size() == 1);
 
-    ValidateAndCopyShape(outputShape, expectedOutputShape[0], shapeInferenceMethod, "TransposeConvolution2dLayer");
+    ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
 }
 
 Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp
index ecdf7dc..1ee984d 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.hpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -33,8 +33,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref TransposeConvolution2dLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// Infers the output shapes from given input shapes and layer properties.
     /// @param [in] inputShapes The input shapes the layer has.
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 7dfb003..61e6863 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -40,19 +40,19 @@
     return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
 }
 
-void TransposeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void TransposeLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
 
-    VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
     ARMNN_ASSERT(inferredShapes.size() == 1);
 
-    ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "TransposeLayer");
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
 }
 
 void TransposeLayer::Accept(ILayerVisitor& visitor) const
diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp
index 3e94a9f..a424524 100644
--- a/src/armnn/layers/TransposeLayer.hpp
+++ b/src/armnn/layers/TransposeLayer.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -27,8 +27,7 @@
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref TransposeLayer.
     /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
-    void ValidateTensorShapesFromInputs(
-            ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+    void ValidateTensorShapesFromInputs() override;
 
     /// Infers the output shapes from given input shapes and the permutation vector.
     /// @param [in] inputShapes The input shapes layer has.
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 21df1f0..25b0fea 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -60,7 +60,7 @@
 
     const unsigned int outputSize = layer->GetNumOutputSlots();
 
-    const auto runTestWithMask = [&](const bool maskPermutations[], ShapeInferenceMethod shapeInferenceMethod)
+    const auto runTestWithMask = [&](const bool maskPermutations[])
     {
         for (unsigned int i = 0; i < outputSize; ++i)
         {
@@ -68,7 +68,7 @@
                                                   DataType::Float32});
         }
 
-        layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
+        layer->ValidateTensorShapesFromInputs();
 
         for (unsigned int i = 0; i < outputSize; ++i)
         {
@@ -82,10 +82,12 @@
         layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
     }
 
-    BOOST_CHECK_THROW(
-            layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException);
+    layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
 
-    layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate);
+    BOOST_CHECK_THROW(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+
+    layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
+    layer->ValidateTensorShapesFromInputs();
 
     for (unsigned int i = 0; i < outputSize; ++i)
     {
@@ -93,16 +95,13 @@
     }
 
     // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
-    for (unsigned int i = 0; i <= numDimensions[0]; ++i)
+    for (unsigned int i = 0; i < numDimensions[0]; ++i)
     {
-        runTestWithMask(maskPermutations[i], ShapeInferenceMethod::InferAndValidate);
+        runTestWithMask(maskPermutations[i]);
     }
 
     // maskPermutations[5] equates to all dimensions being known
-    runTestWithMask(maskPermutations[5], ShapeInferenceMethod::ValidateOnly);
-
-    BOOST_CHECK_THROW(
-            runTestWithMask(maskPermutations[5], ShapeInferenceMethod::InferAndValidate), LayerValidationException);
+    runTestWithMask(maskPermutations[5]);
 }
 
 template<typename LayerT, typename... Args>
@@ -110,13 +109,68 @@
                            const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
                            Args &&... args)
 {
-    Graph graph;
+    Graph graph(true);
 
     auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
 
     RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
 }
 
+BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
+{
+     BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
+     {
+        { "InferAndValidate", true }
+     });
+
+    INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
+    TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
+
+    auto inputLayer = network->AddInputLayer(1, "inputLayer");
+    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    ActivationDescriptor descriptor;
+    descriptor.m_Function = ActivationFunction::Abs;
+    auto activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+    inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+    activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+    BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+
+    BOOST_CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
+
+
+    ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
+                                               {
+                                                       { "InferAndValidate", false }
+                                               });
+
+    network = INetwork::Create({ShapeInferenceMethodOption});
+
+    inputLayer = network->AddInputLayer(1, "inputLayer");
+    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+    inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+    activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+    BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+
+    network = INetwork::Create();
+
+    inputLayer = network->AddInputLayer(1, "inputLayer");
+    inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+    inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+    activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+    BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+}
+
 BOOST_AUTO_TEST_CASE(AbsTest)
 {
     ActivationDescriptor descriptor;
@@ -190,7 +244,7 @@
 
     layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
 
-    layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly);
+    layer->ValidateTensorShapesFromInputs();
 
     BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
 }
@@ -564,15 +618,17 @@
    layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
 
    BOOST_CHECK_THROW(
-           layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException);
+           layer->ValidateTensorShapesFromInputs(), LayerValidationException);
 
-   layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate);
+   layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
+
+    layer->ValidateTensorShapesFromInputs();
 
    BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
 
    layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
 
-   layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly);
+    layer->ValidateTensorShapesFromInputs();
 
    BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
 }