IVGCVSW-6929 Support for models with implicit expanded
 dimensions

 * Added allow-expanded-dims to TFLite parser and ArmNN delegate
   * If true ArmNN will disregard dimensions with a size of 1 when
     validating tensor shapes. Tensor sizes must still match.
   * This allows us to support models where tensors have expanded
     dimensions (i.e. extra dimensions with a size of 1).
 * Fixed bug in Network where it assumed that only the first option
   could be ShapeInferenceMethod.
 * Fixed bug where m_ShapeInferenceMethod was lost when copying or
   moving Graphs.
 * Changed Delegate to pass "infer-output-shape", "allow-expanded-dims"
   and other BackendOptions through to the Network during construction.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ibe7c5ae6597796fc9164cb07bd372bd7f8f8cacf
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 8500e52..ae773cc 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -26,6 +26,8 @@
 
 Graph::Graph(const Graph& other)
 :   m_LayersInOrder(other.m_LayersInOrder)
+,   m_AllowExpandedDims(other.m_AllowExpandedDims)
+,   m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
 ,   m_Profiler(other.m_Profiler)
 {
     std::unordered_map<const Layer*, Layer*> otherToClonedMap;
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 0c34d35..5edf34c 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -95,8 +95,9 @@
         const Graph& m_Graph;
     };
 
-    Graph(bool shapeInferenceMethod = false)
+    Graph(bool shapeInferenceMethod = false, bool allowExpandedDims = false)
         : m_LayersInOrder(true)
+        , m_AllowExpandedDims(allowExpandedDims)
         , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
                                                         ShapeInferenceMethod::ValidateOnly)
         , m_Profiler(std::make_shared<IProfiler>())
@@ -118,11 +119,12 @@
         m_LayersInOrder = std::move(other.m_LayersInOrder);
         m_Views         = std::move(other.m_Views);
         m_Profiler      = std::move(other.m_Profiler);
-
         other.ForEachLayer([this](Layer* otherLayer)
         {
             otherLayer->Reparent(*this, m_Layers.end());
         });
+        m_AllowExpandedDims    = other.m_AllowExpandedDims;
+        m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
 
         ARMNN_ASSERT(other.m_PosInGraphMap.empty());
         ARMNN_ASSERT(other.m_Layers.empty());
@@ -272,8 +274,11 @@
     mutable LayerList m_Layers;
     mutable bool m_LayersInOrder;
 
+    bool m_AllowExpandedDims;
+
     std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
     ShapeInferenceMethod m_ShapeInferenceMethod;
+
     std::shared_ptr<IProfiler> m_Profiler;
 
     // Throws exception due to a layer input not being connected to an output slot.
@@ -424,6 +429,7 @@
     LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
 
     layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+    layer->SetAllowExpandedDims(m_AllowExpandedDims);
 
     NotifyObservables(GraphEvent::LayerAdded, layer);
 
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 3241b50..b1d4952 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -11,6 +11,8 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
+#include <armnnUtils/TensorUtils.hpp>
+
 #include <client/include/IProfilingService.hpp>
 
 #include <fmt/format.h>
@@ -425,11 +427,40 @@
 {
     if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
     {
-        ConditionalThrowIfNotEqual<LayerValidationException>(
-                layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
-                outputShape,
-                inferredShape);
-        return;
+        if (m_AllowExpandedDims)
+        {
+            std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
+            std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
+
+            if (outputDims.size() != inferredDims.size())
+            {
+                std::stringstream ss;
+                ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
+                   "] does not match the inferred shape. ";
+                ss << outputShape << " != " << inferredShape;
+                throw LayerValidationException(ss.str());
+            }
+            for (unsigned int i = 0; i < outputDims.size(); ++i)
+            {
+                if (outputDims[i] != inferredDims[i])
+                {
+                    std::stringstream ss;
+                    ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
+                       "] does not match the inferred shape at dimension index [";
+                    ss << i << "] " << outputShape << " != " << inferredShape;
+                    throw LayerValidationException(ss.str());
+                }
+            }
+            return;
+        }
+        else
+        {
+            ConditionalThrowIfNotEqual<LayerValidationException>(
+                    layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
+                    outputShape,
+                    inferredShape);
+            return;
+        }
     }
 
     if (outputShape.GetDimensionality() == Dimensionality::Specified)
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 114d69c..767cf97 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -238,6 +238,7 @@
     }
 
     ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
+    bool GetAllowExpandedDims() const { return m_AllowExpandedDims; };
 
     const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
     const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
@@ -343,6 +344,11 @@
         m_ShapeInferenceMethod = shapeInferenceMethod;
     }
 
+    void SetAllowExpandedDims(bool allowExpandedDims)
+    {
+        m_AllowExpandedDims = allowExpandedDims;
+    }
+
     template<typename T>
     std::shared_ptr<T> GetAdditionalInformation() const
     {
@@ -428,6 +434,8 @@
     mutable LayerPriority m_Priority = 0;
     mutable bool m_Visiting = false;
 
+    bool m_AllowExpandedDims = false;
+
     LayerGuid m_Guid;
 
     std::list<std::string> m_RelatedLayerNames;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 77ad5c4..6a646d3 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1854,16 +1854,35 @@
 
 bool NetworkImpl::GetShapeInferenceMethod()
 {
-    if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
-    {
-        return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
-    }
+    bool shapeInferenceMethod = false;
 
-    return false;
+    ParseOptions(m_NetworkOptions, "ShapeInferenceMethod", [&](std::string name, const BackendOptions::Var& value)
+    {
+        if (name == "InferAndValidate")
+        {
+            shapeInferenceMethod |= value.AsBool();
+        }
+    });
+    return shapeInferenceMethod;
 }
+
+bool NetworkImpl::GetAllowExpandedDims()
+{
+    bool allowExpandedDims = false;
+
+    ParseOptions(m_NetworkOptions, "AllowExpandedDims", [&](std::string name, const BackendOptions::Var& value)
+    {
+        if (name == "AllowExpandedDims")
+        {
+            allowExpandedDims |= value.AsBool();
+        }
+    });
+    return allowExpandedDims;
+}
+
 NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
 : m_NetworkOptions(networkOptions),
-  m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
+  m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod(), GetAllowExpandedDims()))
 {}
 
 NetworkImpl::~NetworkImpl()
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c2be600..6c7c2f5 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -262,6 +262,7 @@
 private:
 
     bool GetShapeInferenceMethod();
+    bool GetAllowExpandedDims();
     NetworkOptions m_NetworkOptions;
 
     std::unique_ptr<Graph> m_Graph;
diff --git a/src/armnn/layers/LayerCloneBase.hpp b/src/armnn/layers/LayerCloneBase.hpp
index 348b1f3..54b64c5 100644
--- a/src/armnn/layers/LayerCloneBase.hpp
+++ b/src/armnn/layers/LayerCloneBase.hpp
@@ -19,6 +19,7 @@
     layer->SetBackendId(GetBackendId());
     layer->SetGuid(GetGuid());
     layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+    layer->SetAllowExpandedDims(m_AllowExpandedDims);
 
     return layer;
 }
diff --git a/src/armnn/layers/LayerWithParameters.hpp b/src/armnn/layers/LayerWithParameters.hpp
index 2ac16c5..8d9ddff 100644
--- a/src/armnn/layers/LayerWithParameters.hpp
+++ b/src/armnn/layers/LayerWithParameters.hpp
@@ -43,6 +43,7 @@
     WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor) const
     {
         descriptor.m_Parameters = m_Param;
+        descriptor.m_AllowExpandedDims = GetAllowExpandedDims();
         return Layer::PrepInfoAndDesc(descriptor);
     }
 
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index aa07f7b..49f1f9f 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -793,16 +793,27 @@
 
     using NetworkOptions = std::vector<BackendOptions>;
     NetworkOptions networkOptions = {};
-    if (m_Options && m_Options.value().m_InferAndValidate)
+    if (m_Options)
     {
-        BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
-                                                  {
-                                                      { "InferAndValidate", true }
-                                                  });
+        if (m_Options.value().m_InferAndValidate)
+        {
+            BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
+                                                      {
+                                                          { "InferAndValidate", true }
+                                                      });
 
-        networkOptions.push_back(shapeInferenceMethodOption);
+            networkOptions.push_back(shapeInferenceMethodOption);
+        }
+        if (m_Options.value().m_AllowExpandedDims)
+        {
+            BackendOptions shapeInferenceMethodOption("AllowExpandedDims",
+                                                      {
+                                                          { "AllowExpandedDims", true }
+                                                      });
+
+            networkOptions.push_back(shapeInferenceMethodOption);
+        }
     }
-
     m_Network = INetwork::Create(networkOptions);
     ARMNN_ASSERT(m_Model.get() != nullptr);
 
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 5b5b2bd..d77f5d7 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -131,6 +131,22 @@
     return TensorShape(outputDim, outputShape.data());
 }
 
+std::vector<unsigned int> SqueezeDims(const TensorShape& tensorShape)
+{
+    unsigned int outputDimSize = 0;
+    std::vector<unsigned int> squeezedDims;
+
+    for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
+    {
+        if (tensorShape[i] != 1)
+        {
+            squeezedDims.push_back(tensorShape[i]);
+            ++outputDimSize;
+        }
+    }
+    return squeezedDims;
+}
+
 unsigned int GetNumElementsBetween(const TensorShape& shape,
                                    const unsigned int firstAxisInclusive,
                                    const unsigned int lastAxisExclusive)
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 37fda3e..2194b48 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -107,18 +107,6 @@
 }
 
 //---------------------------------------------------------------
-void ValidateTensorNumDimensions(const TensorInfo& tensor,
-                                 std::string const& descName,
-                                 unsigned int numDimensions,
-                                 std::string const& tensorName)
-{
-    if (tensor.GetNumDimensions() != numDimensions)
-    {
-        throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
-            to_string(tensor.GetNumDimensions()) + " dimensions for " +
-            tensorName + " tensor.");
-    }
-}
 
 //---------------------------------------------------------------
 void ValidateTensorNumElements(const TensorInfo& tensor,
@@ -135,17 +123,6 @@
 }
 
 //---------------------------------------------------------------
-void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
-                                 unsigned int numDimension,
-                                 unsigned int numElements,
-                                 std::string const& tensorName)
-{
-    const std::string functionName{"ValidateTensorNumDimNumElem"};
-    ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
-    ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
-}
-
-//---------------------------------------------------------------
 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
     const std::string& descName, std::string const& tensorName)
 {
@@ -444,6 +421,56 @@
 
 } // anonymous namespace
 
+//---------------------------------------------------------------
+void QueueDescriptor::ValidateTensorNumDimensions(const TensorInfo& tensor,
+                                                  std::string const& descName,
+                                                  unsigned int numDimensions,
+                                                  std::string const& tensorName) const
+{
+    // If we're allowing expanded dimensions then numDimensions becomes the minimum number of Dimensions we can allow.
+    // Throw an Exception if the tensors has fewer than numDimensions or if the squeezed dimensions are greater than
+    // numDimensions.
+    if (m_AllowExpandedDims)
+    {
+        unsigned int squeezedDims = 0;
+
+        for (unsigned int i = 0; i < tensor.GetNumDimensions(); ++i)
+        {
+            if (tensor.GetShape()[i] != 1)
+            {
+                ++squeezedDims;
+            }
+        }
+        if (tensor.GetNumDimensions() < numDimensions || squeezedDims > numDimensions)
+        {
+            throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " or less but got " +
+                                           to_string(tensor.GetNumDimensions()) + " dimensions for " +
+                                           tensorName + " tensor.");
+        }
+    }
+    else
+    {
+        if (tensor.GetNumDimensions() != numDimensions)
+        {
+            throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
+                                           to_string(tensor.GetNumDimensions()) + " dimensions for " +
+                                           tensorName + " tensor.");
+        }
+    }
+}
+
+//---------------------------------------------------------------
+void QueueDescriptor::ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
+                                 unsigned int numDimension,
+                                 unsigned int numElements,
+                                 std::string const& tensorName) const
+{
+    const std::string functionName{"ValidateTensorNumDimNumElem"};
+    ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
+    ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
+}
+
+//---------------------------------------------------------------
 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
     unsigned int numExpectedIn, unsigned int numExpectedOut) const
 {
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index bd1b94e..1fe53de 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -276,7 +276,7 @@
 OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
                                                   const ModelOptions& modelOptions) const
 {
-    OptimizationViews optimizationViews;
+    OptimizationViews optimizationViews(modelOptions);
 
     auto it = subgraph.endIConnectable();
     bool isFastMathEnabled = false;
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 2433642..968bce4 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -139,9 +139,10 @@
     return layerSupport;
 }
 
-OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
+OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
+                                                    const ModelOptions& modelOptions) const
 {
-    OptimizationViews optimizationViews;
+    OptimizationViews optimizationViews(modelOptions);
 
     auto it = subgraph.endIConnectable();
     std::map<LayerGuid, Layer*> untouched;
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index e3e3782..d407368 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -52,7 +52,8 @@
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
-    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
+                                           const ModelOptions& modelOptions) const override;
 
     std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;