IVGCVSW-6929 Support for models with implicit expanded
 dimensions

 * Added allow-expanded-dims to TFLite parser and ArmNN delegate
   * If true ArmNN will disregard dimensions with a size of 1 when
     validating tensor shapes. Tensor sizes must still match.
   * This allows us to support models where tensors have expanded
     dimensions (i.e. extra dimensions with a size of 1).
 * Fixed bug in Network where it assumed that only the first option
   could be ShapeInferenceMethod.
 * Fixed bug where m_ShapeInferenceMethod was lost when copying or
   moving Graphs.
 * Changed Delegate to pass "infer-output-shape", "allow-expanded-dims"
   and other BackendOptions through to the Network during construction.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ibe7c5ae6597796fc9164cb07bd372bd7f8f8cacf
diff --git a/delegate/include/DelegateOptions.hpp b/delegate/include/DelegateOptions.hpp
index 7f7eaa5..d789ea7 100644
--- a/delegate/include/DelegateOptions.hpp
+++ b/delegate/include/DelegateOptions.hpp
@@ -163,6 +163,17 @@
      *    Possible values: [filenameString] \n
      *    Description: Serialize the optimized network to the file specified in "dot" format.
      *
+     *    Option key: "infer-output-shape" \n
+     *    Possible values: ["true"/"false"] \n
+     *    Description: Infers output tensor shape from input tensor shape and validate where applicable.
+     *
+     *    Option key: "allow-expanded-dims" \n
+     *    Possible values: ["true"/"false"] \n
+     *    Description: If true will disregard dimensions with a size of 1 when validating tensor shapes but tensor
+     *                 sizes must still match. \n
+     *                 This is an Experimental parameter that is incompatible with "infer-output-shape". \n
+     *                 This parameter may be removed in a later update.
+     *
      * @param[in]     option_keys     Delegate option names
      * @param[in]     options_values  Delegate option values
      * @param[in]     num_options     Number of delegate options
diff --git a/delegate/src/DelegateOptions.cpp b/delegate/src/DelegateOptions.cpp
index 9413a46..f3e13c9 100644
--- a/delegate/src/DelegateOptions.cpp
+++ b/delegate/src/DelegateOptions.cpp
@@ -156,6 +156,24 @@
         {
             optimizerOptions.m_Debug = armnn::stringUtils::StringToBool(options_values[i]);
         }
+            // Infer output-shape
+        else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
+        {
+            armnn::BackendOptions backendOption("ShapeInferenceMethod",
+            {
+                { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) }
+            });
+            optimizerOptions.m_ModelOptions.push_back(backendOption);
+        }
+            // Allow expanded dims
+        else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
+        {
+            armnn::BackendOptions backendOption("AllowExpandedDims",
+            {
+                { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) }
+            });
+            optimizerOptions.m_ModelOptions.push_back(backendOption);
+        }
             // Process memory-import
         else if (std::string(options_keys[i]) == std::string("memory-import"))
         {
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 4d71f26..6e1a91f 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -308,7 +308,7 @@
     DelegateData delegateData(delegate->m_Options.GetBackends());
 
     // Build ArmNN Network
-    armnn::NetworkOptions networkOptions = {};
+    armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions;
     armnn::NetworkId networkId;
     delegateData.m_Network = armnn::INetwork::Create(networkOptions);
 
diff --git a/include/armnn/backends/OptimizationViews.hpp b/include/armnn/backends/OptimizationViews.hpp
index f7346de..29ee68c 100644
--- a/include/armnn/backends/OptimizationViews.hpp
+++ b/include/armnn/backends/OptimizationViews.hpp
@@ -13,7 +13,7 @@
 class OptimizationViews
 {
 public:
-    OptimizationViews() = default;
+    OptimizationViews(NetworkOptions networkOptions = {}) : m_INetwork(INetwork::Create(networkOptions)) {}
     OptimizationViews(const OptimizationViews&) = delete;
     OptimizationViews& operator=(const OptimizationViews&) = delete;
     OptimizationViews(OptimizationViews&&) = default;
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index ed89f96..1a2f34e 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -29,6 +29,16 @@
 
     virtual ~QueueDescriptor() = default;
 
+    void ValidateTensorNumDimensions(const TensorInfo& tensor,
+                                     std::string const& descName,
+                                     unsigned int numDimensions,
+                                     std::string const& tensorName) const;
+
+    void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
+                                     unsigned int numDimension,
+                                     unsigned int numElements,
+                                     std::string const& tensorName) const;
+
     void ValidateInputsOutputs(const std::string& descName,
                                unsigned int numExpectedIn,
                                unsigned int numExpectedOut) const;
@@ -39,6 +49,8 @@
         return static_cast<T*>(m_AdditionalInfoObject);
     }
 
+    bool m_AllowExpandedDims = false;
+
 protected:
     QueueDescriptor()
         : m_AdditionalInfoObject(nullptr)
diff --git a/include/armnnTfLiteParser/ITfLiteParser.hpp b/include/armnnTfLiteParser/ITfLiteParser.hpp
index b286c1e..ea6e87a 100644
--- a/include/armnnTfLiteParser/ITfLiteParser.hpp
+++ b/include/armnnTfLiteParser/ITfLiteParser.hpp
@@ -29,9 +29,11 @@
     struct TfLiteParserOptions
     {
         TfLiteParserOptions()
-            : m_StandInLayerForUnsupported(false),
+            : m_AllowExpandedDims(false),
+              m_StandInLayerForUnsupported(false),
               m_InferAndValidate(false) {}
 
+        bool m_AllowExpandedDims;
         bool m_StandInLayerForUnsupported;
         bool m_InferAndValidate;
     };
diff --git a/include/armnnUtils/TensorUtils.hpp b/include/armnnUtils/TensorUtils.hpp
index 6a97525..fc2f510 100644
--- a/include/armnnUtils/TensorUtils.hpp
+++ b/include/armnnUtils/TensorUtils.hpp
@@ -34,6 +34,8 @@
 
 armnn::TensorShape ExpandDims(const armnn::TensorShape& tensorShape, int axis);
 
+std::vector<unsigned int> SqueezeDims(const armnn::TensorShape& tensorShape);
+
 unsigned int GetNumElementsBetween(const armnn::TensorShape& shape,
                                    unsigned int firstAxisInclusive,
                                    unsigned int lastAxisExclusive);
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 8500e52..ae773cc 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -26,6 +26,8 @@
 
 Graph::Graph(const Graph& other)
 :   m_LayersInOrder(other.m_LayersInOrder)
+,   m_AllowExpandedDims(other.m_AllowExpandedDims)
+,   m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
 ,   m_Profiler(other.m_Profiler)
 {
     std::unordered_map<const Layer*, Layer*> otherToClonedMap;
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 0c34d35..5edf34c 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -95,8 +95,9 @@
         const Graph& m_Graph;
     };
 
-    Graph(bool shapeInferenceMethod = false)
+    Graph(bool shapeInferenceMethod = false, bool allowExpandedDims = false)
         : m_LayersInOrder(true)
+        , m_AllowExpandedDims(allowExpandedDims)
         , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
                                                         ShapeInferenceMethod::ValidateOnly)
         , m_Profiler(std::make_shared<IProfiler>())
@@ -118,11 +119,12 @@
         m_LayersInOrder = std::move(other.m_LayersInOrder);
         m_Views         = std::move(other.m_Views);
         m_Profiler      = std::move(other.m_Profiler);
-
         other.ForEachLayer([this](Layer* otherLayer)
         {
             otherLayer->Reparent(*this, m_Layers.end());
         });
+        m_AllowExpandedDims    = other.m_AllowExpandedDims;
+        m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
 
         ARMNN_ASSERT(other.m_PosInGraphMap.empty());
         ARMNN_ASSERT(other.m_Layers.empty());
@@ -272,8 +274,11 @@
     mutable LayerList m_Layers;
     mutable bool m_LayersInOrder;
 
+    bool m_AllowExpandedDims;
+
     std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
     ShapeInferenceMethod m_ShapeInferenceMethod;
+
     std::shared_ptr<IProfiler> m_Profiler;
 
     // Throws exception due to a layer input not being connected to an output slot.
@@ -424,6 +429,7 @@
     LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
 
     layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+    layer->SetAllowExpandedDims(m_AllowExpandedDims);
 
     NotifyObservables(GraphEvent::LayerAdded, layer);
 
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 3241b50..b1d4952 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -11,6 +11,8 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
+#include <armnnUtils/TensorUtils.hpp>
+
 #include <client/include/IProfilingService.hpp>
 
 #include <fmt/format.h>
@@ -425,11 +427,40 @@
 {
     if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
     {
-        ConditionalThrowIfNotEqual<LayerValidationException>(
-                layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
-                outputShape,
-                inferredShape);
-        return;
+        if (m_AllowExpandedDims)
+        {
+            std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
+            std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
+
+            if (outputDims.size() != inferredDims.size())
+            {
+                std::stringstream ss;
+                ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
+                   "] does not match the inferred shape. ";
+                ss << outputShape << " != " << inferredShape;
+                throw LayerValidationException(ss.str());
+            }
+            for (unsigned int i = 0; i < outputDims.size(); ++i)
+            {
+                if (outputDims[i] != inferredDims[i])
+                {
+                    std::stringstream ss;
+                    ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
+                       "] does not match the inferred shape at dimension index [";
+                    ss << i << "] " << outputShape << " != " << inferredShape;
+                    throw LayerValidationException(ss.str());
+                }
+            }
+            return;
+        }
+        else
+        {
+            ConditionalThrowIfNotEqual<LayerValidationException>(
+                    layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
+                    outputShape,
+                    inferredShape);
+            return;
+        }
     }
 
     if (outputShape.GetDimensionality() == Dimensionality::Specified)
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 114d69c..767cf97 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -238,6 +238,7 @@
     }
 
     ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
+    bool GetAllowExpandedDims() const { return m_AllowExpandedDims; };
 
     const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
     const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
@@ -343,6 +344,11 @@
         m_ShapeInferenceMethod = shapeInferenceMethod;
     }
 
+    void SetAllowExpandedDims(bool allowExpandedDims)
+    {
+        m_AllowExpandedDims = allowExpandedDims;
+    }
+
     template<typename T>
     std::shared_ptr<T> GetAdditionalInformation() const
     {
@@ -428,6 +434,8 @@
     mutable LayerPriority m_Priority = 0;
     mutable bool m_Visiting = false;
 
+    bool m_AllowExpandedDims = false;
+
     LayerGuid m_Guid;
 
     std::list<std::string> m_RelatedLayerNames;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 77ad5c4..6a646d3 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1854,16 +1854,35 @@
 
 bool NetworkImpl::GetShapeInferenceMethod()
 {
-    if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
-    {
-        return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
-    }
+    bool shapeInferenceMethod = false;
 
-    return false;
+    ParseOptions(m_NetworkOptions, "ShapeInferenceMethod", [&](std::string name, const BackendOptions::Var& value)
+    {
+        if (name == "InferAndValidate")
+        {
+            shapeInferenceMethod |= value.AsBool();
+        }
+    });
+    return shapeInferenceMethod;
 }
+
+bool NetworkImpl::GetAllowExpandedDims()
+{
+    bool allowExpandedDims = false;
+
+    ParseOptions(m_NetworkOptions, "AllowExpandedDims", [&](std::string name, const BackendOptions::Var& value)
+    {
+        if (name == "AllowExpandedDims")
+        {
+            allowExpandedDims |= value.AsBool();
+        }
+    });
+    return allowExpandedDims;
+}
+
 NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
 : m_NetworkOptions(networkOptions),
-  m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
+  m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod(), GetAllowExpandedDims()))
 {}
 
 NetworkImpl::~NetworkImpl()
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c2be600..6c7c2f5 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -262,6 +262,7 @@
 private:
 
     bool GetShapeInferenceMethod();
+    bool GetAllowExpandedDims();
     NetworkOptions m_NetworkOptions;
 
     std::unique_ptr<Graph> m_Graph;
diff --git a/src/armnn/layers/LayerCloneBase.hpp b/src/armnn/layers/LayerCloneBase.hpp
index 348b1f3..54b64c5 100644
--- a/src/armnn/layers/LayerCloneBase.hpp
+++ b/src/armnn/layers/LayerCloneBase.hpp
@@ -19,6 +19,7 @@
     layer->SetBackendId(GetBackendId());
     layer->SetGuid(GetGuid());
     layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+    layer->SetAllowExpandedDims(m_AllowExpandedDims);
 
     return layer;
 }
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp
index 2ac16c5..8d9ddff 100644
--- a/src/armnn/layers/LayerWithParameters.hpp
+++ b/src/armnn/layers/LayerWithParameters.hpp
@@ -43,6 +43,7 @@
     WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor) const
     {
         descriptor.m_Parameters = m_Param;
+        descriptor.m_AllowExpandedDims = GetAllowExpandedDims();
         return Layer::PrepInfoAndDesc(descriptor);
     }
 
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index aa07f7b..49f1f9f 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -793,16 +793,27 @@
 
     using NetworkOptions = std::vector<BackendOptions>;
     NetworkOptions networkOptions = {};
-    if (m_Options && m_Options.value().m_InferAndValidate)
+    if (m_Options)
     {
-        BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
-                                                  {
-                                                      { "InferAndValidate", true }
-                                                  });
+        if (m_Options.value().m_InferAndValidate)
+        {
+            BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
+                                                      {
+                                                          { "InferAndValidate", true }
+                                                      });
 
-        networkOptions.push_back(shapeInferenceMethodOption);
+            networkOptions.push_back(shapeInferenceMethodOption);
+        }
+        if (m_Options.value().m_AllowExpandedDims)
+        {
+            BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
+                                                      {
+                                                          { "AllowExpandedDims", true }
+                                                      });
+
+            networkOptions.push_back(shapeInferenceMethodOption);
+        }
     }
-
     m_Network = INetwork::Create(networkOptions);
     ARMNN_ASSERT(m_Model.get() != nullptr);
 
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 5b5b2bd..d77f5d7 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -131,6 +131,22 @@
     return TensorShape(outputDim, outputShape.data());
 }
 
+std::vector<unsigned int> SqueezeDims(const TensorShape& tensorShape)
+{
+    unsigned int outputDimSize = 0;
+    std::vector<unsigned int> squeezedDims;
+
+    for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
+    {
+        if (tensorShape[i] != 1)
+        {
+            squeezedDims.push_back(tensorShape[i]);
+            ++outputDimSize;
+        }
+    }
+    return squeezedDims;
+}
+
 unsigned int GetNumElementsBetween(const TensorShape& shape,
                                    const unsigned int firstAxisInclusive,
                                    const unsigned int lastAxisExclusive)
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 37fda3e..2194b48 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -107,18 +107,6 @@
 }
 
 //---------------------------------------------------------------
-void ValidateTensorNumDimensions(const TensorInfo& tensor,
-                                 std::string const& descName,
-                                 unsigned int numDimensions,
-                                 std::string const& tensorName)
-{
-    if (tensor.GetNumDimensions() != numDimensions)
-    {
-        throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
-            to_string(tensor.GetNumDimensions()) + " dimensions for " +
-            tensorName + " tensor.");
-    }
-}
 
 //---------------------------------------------------------------
 void ValidateTensorNumElements(const TensorInfo& tensor,
@@ -135,17 +123,6 @@
 }
 
 //---------------------------------------------------------------
-void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
-                                 unsigned int numDimension,
-                                 unsigned int numElements,
-                                 std::string const& tensorName)
-{
-    const std::string functionName{"ValidateTensorNumDimNumElem"};
-    ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
-    ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
-}
-
-//---------------------------------------------------------------
 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
     const std::string& descName, std::string const& tensorName)
 {
@@ -444,6 +421,56 @@
 
 } // anonymous namespace
 
+//---------------------------------------------------------------
+void QueueDescriptor::ValidateTensorNumDimensions(const TensorInfo& tensor,
+                                                  std::string const& descName,
+                                                  unsigned int numDimensions,
+                                                  std::string const& tensorName) const
+{
+    // If we're allowing expanded dimensions then numDimensions becomes the minimum number of Dimensions we can allow.
+    // Throw an Exception if the tensors has fewer than numDimensions or if the squeezed dimensions are greater than
+    // numDimensions.
+    if (m_AllowExpandedDims)
+    {
+        unsigned int squeezedDims = 0;
+
+        for (unsigned int i = 0; i < tensor.GetNumDimensions(); ++i)
+        {
+            if (tensor.GetShape()[i] != 1)
+            {
+                ++squeezedDims;
+            }
+        }
+        if (tensor.GetNumDimensions() < numDimensions || squeezedDims > numDimensions)
+        {
+            throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " or less but got " +
+                                           to_string(tensor.GetNumDimensions()) + " dimensions for " +
+                                           tensorName + " tensor.");
+        }
+    }
+    else
+    {
+        if (tensor.GetNumDimensions() != numDimensions)
+        {
+            throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
+                                           to_string(tensor.GetNumDimensions()) + " dimensions for " +
+                                           tensorName + " tensor.");
+        }
+    }
+}
+
+//---------------------------------------------------------------
+void QueueDescriptor::ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
+                                 unsigned int numDimension,
+                                 unsigned int numElements,
+                                 std::string const& tensorName) const
+{
+    const std::string functionName{"ValidateTensorNumDimNumElem"};
+    ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
+    ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
+}
+
+//---------------------------------------------------------------
 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
     unsigned int numExpectedIn, unsigned int numExpectedOut) const
 {
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index bd1b94e..1fe53de 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -276,7 +276,7 @@
 OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
                                                   const ModelOptions& modelOptions) const
 {
-    OptimizationViews optimizationViews;
+    OptimizationViews optimizationViews(modelOptions);
 
     auto it = subgraph.endIConnectable();
     bool isFastMathEnabled = false;
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 2433642..968bce4 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -139,9 +139,10 @@
     return layerSupport;
 }
 
-OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
+OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
+                                                    const ModelOptions& modelOptions) const
 {
-    OptimizationViews optimizationViews;
+    OptimizationViews optimizationViews(modelOptions);
 
     auto it = subgraph.endIConnectable();
     std::map<LayerGuid, Layer*> untouched;
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index e3e3782..d407368 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -52,7 +52,8 @@
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
-    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
+                                           const ModelOptions& modelOptions) const override;
 
     std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;
 
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index ddabf3c..f0a3d08 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -389,6 +389,7 @@
         // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
         typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
         inferenceModelParams.m_ModelPath                      = params.m_ModelPath;
+        inferenceModelParams.m_AllowExpandedDims              = params.m_AllowExpandedDims;
         inferenceModelParams.m_IsModelBinary                  = params.m_IsModelBinary;
         inferenceModelParams.m_ComputeDevices                 = params.m_ComputeDevices;
         inferenceModelParams.m_DynamicBackendsPath            = params.m_DynamicBackendsPath;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index b3d18cd..cc75bb4 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -232,6 +232,11 @@
     {
         ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
     }
+
+    if (m_AllowExpandedDims && m_InferOutputShape)
+    {
+        throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
+    }
 }
 
 #if defined(ARMNN_TFLITE_DELEGATE)
@@ -277,6 +282,22 @@
     options.m_ModelOptions.push_back(gpuAcc);
     options.m_ModelOptions.push_back(cpuAcc);
 
+    if (m_InferOutputShape)
+    {
+        armnn::BackendOptions networkOption("ShapeInferenceMethod",
+                                            {
+                                                    {"InferAndValidate", true}
+                                            });
+        options.m_ModelOptions.push_back(networkOption);
+    }
+    if (m_AllowExpandedDims)
+    {
+        armnn::BackendOptions networkOption("AllowExpandedDims",
+                                            {
+                                                    {"AllowExpandedDims", true}
+                                            });
+        options.m_ModelOptions.push_back(networkOption);
+    }
     delegateOptions.SetOptimizerOptions(options);
 
     // If v,visualize-optimized-model is enabled then construct a file name for the dot file.
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 04a0733..5ef2b6e 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -25,6 +25,7 @@
         TfliteInterpreter
     };
 
+    bool                          m_AllowExpandedDims;
     std::string                   m_CachedNetworkFilePath;
     std::vector<armnn::BackendId> m_ComputeDevices;
     bool                          m_Concurrent;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index c84c79e..ad35092 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -228,6 +228,13 @@
                  "parser)",
                  cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
 
+                ("allow-expanded-dims",
+                 "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
+                 "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
+                 "This parameter may be removed in a later update. ",
+                 cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
+                 ->implicit_value("true"))
+
                 ("iterations",
                  "Number of iterations to run the network for, default is set to 1. "
                  "If you wish to run the model with different input data for every execution you can do so by "
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index e2a1a97..93716e1 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -95,6 +95,7 @@
     std::vector<armnn::BackendId>   m_ComputeDevices;
     std::string                     m_DynamicBackendsPath;
     size_t                          m_SubgraphId;
+    bool                            m_AllowExpandedDims;
     bool                            m_IsModelBinary;
     bool                            m_VisualizePostOptimizationModel;
     bool                            m_EnableFp16TurboMode;
@@ -117,6 +118,7 @@
     Params()
         : m_ComputeDevices{}
         , m_SubgraphId(0)
+        , m_AllowExpandedDims(false)
         , m_IsModelBinary(true)
         , m_VisualizePostOptimizationModel(false)
         , m_EnableFp16TurboMode(false)
@@ -268,6 +270,7 @@
 
         // Create a network from a file on disk
         IParser::TfLiteParserOptions options;
+        options.m_AllowExpandedDims          = params.m_AllowExpandedDims;
         options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
         options.m_InferAndValidate           = params.m_InferOutputShape;
         auto parser(IParser::Create(options));