IVGCVSW-6929 Support for models with implicit expanded
 dimensions

 * Added allow-expanded-dims to TFLite parser and ArmNN delegate
   * If true ArmNN will disregard dimensions with a size of 1 when
     validating tensor shapes. Tensor sizes must still match.
   * This allows us to support models where tensors have expanded
     dimensions (i.e. extra dimensions with a size of 1).
 * Fixed bug in Network where it assumed that only the first option
   could be ShapeInferenceMethod.
 * Fixed bug where m_ShapeInferenceMethod was lost when copying or
   moving Graphs.
 * Changed Delegate to pass "infer-output-shape", "allow-expanded-dims"
   and other BackendOptions through to the Network during construction.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ibe7c5ae6597796fc9164cb07bd372bd7f8f8cacf
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 37fda3e..2194b48 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -107,18 +107,6 @@
 }
 
 //---------------------------------------------------------------
-void ValidateTensorNumDimensions(const TensorInfo& tensor,
-                                 std::string const& descName,
-                                 unsigned int numDimensions,
-                                 std::string const& tensorName)
-{
-    if (tensor.GetNumDimensions() != numDimensions)
-    {
-        throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
-            to_string(tensor.GetNumDimensions()) + " dimensions for " +
-            tensorName + " tensor.");
-    }
-}
 
 //---------------------------------------------------------------
 void ValidateTensorNumElements(const TensorInfo& tensor,
@@ -135,17 +123,6 @@
 }
 
 //---------------------------------------------------------------
-void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
-                                 unsigned int numDimension,
-                                 unsigned int numElements,
-                                 std::string const& tensorName)
-{
-    const std::string functionName{"ValidateTensorNumDimNumElem"};
-    ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
-    ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
-}
-
-//---------------------------------------------------------------
 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
     const std::string& descName, std::string const& tensorName)
 {
@@ -444,6 +421,56 @@
 
 } // anonymous namespace
 
+//---------------------------------------------------------------
+void QueueDescriptor::ValidateTensorNumDimensions(const TensorInfo& tensor,
+                                                  std::string const& descName,
+                                                  unsigned int numDimensions,
+                                                  std::string const& tensorName) const
+{
+    // If we're allowing expanded dimensions then numDimensions becomes the minimum number of Dimensions we can allow.
+    // Throw an Exception if the tensors has fewer than numDimensions or if the squeezed dimensions are greater than
+    // numDimensions.
+    if (m_AllowExpandedDims)
+    {
+        unsigned int squeezedDims = 0;
+
+        for (unsigned int i = 0; i < tensor.GetNumDimensions(); ++i)
+        {
+            if (tensor.GetShape()[i] != 1)
+            {
+                ++squeezedDims;
+            }
+        }
+        if (tensor.GetNumDimensions() < numDimensions || squeezedDims > numDimensions)
+        {
+            throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " or less but got " +
+                                           to_string(tensor.GetNumDimensions()) + " dimensions for " +
+                                           tensorName + " tensor.");
+        }
+    }
+    else
+    {
+        if (tensor.GetNumDimensions() != numDimensions)
+        {
+            throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
+                                           to_string(tensor.GetNumDimensions()) + " dimensions for " +
+                                           tensorName + " tensor.");
+        }
+    }
+}
+
+//---------------------------------------------------------------
+void QueueDescriptor::ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
+                                 unsigned int numDimension,
+                                 unsigned int numElements,
+                                 std::string const& tensorName) const
+{
+    const std::string functionName{"ValidateTensorNumDimNumElem"};
+    ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
+    ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
+}
+
+//---------------------------------------------------------------
 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
     unsigned int numExpectedIn, unsigned int numExpectedOut) const
 {
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index bd1b94e..1fe53de 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -276,7 +276,7 @@
 OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
                                                   const ModelOptions& modelOptions) const
 {
-    OptimizationViews optimizationViews;
+    OptimizationViews optimizationViews(modelOptions);
 
     auto it = subgraph.endIConnectable();
     bool isFastMathEnabled = false;
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 2433642..968bce4 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -139,9 +139,10 @@
     return layerSupport;
 }
 
-OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
+OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
+                                                    const ModelOptions& modelOptions) const
 {
-    OptimizationViews optimizationViews;
+    OptimizationViews optimizationViews(modelOptions);
 
     auto it = subgraph.endIConnectable();
     std::map<LayerGuid, Layer*> untouched;
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index e3e3782..d407368 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -52,7 +52,8 @@
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const override;
 
-    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+    OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
+                                           const ModelOptions& modelOptions) const override;
 
     std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;