IVGCVSW-7267 Make the AllowExpandedDims option work

Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Change-Id: I3573078206272c3a72a2b3acf8781ab458ea6c90
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index fefb2eb..0289a90 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -135,6 +135,7 @@
         , m_ModelOptions()
         , m_ProfilingEnabled(false)
         , m_ExportEnabled(false)
+        , m_AllowExpandedDims(false)
     {}
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
@@ -147,6 +148,7 @@
         , m_ModelOptions(modelOptions)
         , m_ProfilingEnabled(false)
         , m_ExportEnabled(exportEnabled)
+        , m_AllowExpandedDims(false)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -156,7 +158,8 @@
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
-                     bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false)
+                     bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
+                     bool allowExpandedDims = false)
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -165,6 +168,7 @@
         , m_ModelOptions(modelOptions)
         , m_ProfilingEnabled(false)
         , m_ExportEnabled(exportEnabled)
+        , m_AllowExpandedDims(allowExpandedDims)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -184,6 +188,7 @@
         stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
         stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
         stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
+        stream << "\tAllowExpandedDims: " << m_AllowExpandedDims << "\n";
 
         stream << "\tModelOptions: \n";
         for (auto optionsGroup : m_ModelOptions)
@@ -231,6 +236,9 @@
 
     // Enable Export
     bool m_ExportEnabled;
+
+    // When calculating tensor sizes dimensions of size == 1 will be ignored
+    bool m_AllowExpandedDims;
 };
 
 class IWorkloadFactory;
@@ -246,8 +254,8 @@
 class INetwork
 {
 public:
-    static INetwork* CreateRaw(NetworkOptions networkOptions = {});
-    static INetworkPtr Create(NetworkOptions networkOptions = {});
+    static INetwork* CreateRaw(const NetworkOptions& networkOptions = {});
+    static INetworkPtr Create(const NetworkOptions& networkOptions = {});
     static void Destroy(INetwork* network);
 
     Status PrintGraph();
diff --git a/include/armnn/backends/OptimizationViews.hpp b/include/armnn/backends/OptimizationViews.hpp
index a7714ee..110c5f7 100644
--- a/include/armnn/backends/OptimizationViews.hpp
+++ b/include/armnn/backends/OptimizationViews.hpp
@@ -13,7 +13,7 @@
 class OptimizationViews
 {
 public:
-    OptimizationViews(NetworkOptions networkOptions = {}) : m_INetwork(INetwork::Create(networkOptions)) {}
+    OptimizationViews(const NetworkOptions& networkOptions = {}) : m_INetwork(INetwork::Create(networkOptions)) {}
     OptimizationViews(const OptimizationViews&) = delete;
     OptimizationViews& operator=(const OptimizationViews&) = delete;
     OptimizationViews(OptimizationViews&&) = default;
@@ -72,7 +72,7 @@
     /// INetworkPtr object used only as a container for any layer generated by the optimization process
     /// Also, can use to AddPrecompiledLayer to the SubstitutionPair
     /// Use in favour of m_Graph which depreciates in 23.08
-    INetworkPtr m_INetwork = INetwork::Create();;
+    INetworkPtr m_INetwork = INetwork::Create();
 };
 
 } //namespace armnn
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 482d927..1b87751 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -119,12 +119,12 @@
         m_LayersInOrder = std::move(other.m_LayersInOrder);
         m_Views         = std::move(other.m_Views);
         m_Profiler      = std::move(other.m_Profiler);
+        m_AllowExpandedDims    = other.m_AllowExpandedDims;
+        m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
         other.ForEachLayer([this](Layer* otherLayer)
         {
             otherLayer->Reparent(*this, m_Layers.end());
         });
-        m_AllowExpandedDims    = other.m_AllowExpandedDims;
-        m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
 
         ARMNN_ASSERT(other.m_PosInGraphMap.empty());
         ARMNN_ASSERT(other.m_Layers.empty());
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 5930805..1b1815f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -442,12 +442,12 @@
     return pNetworkImpl->ExecuteStrategy(strategy);
 }
 
-armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
+armnn::INetwork* INetwork::CreateRaw(const NetworkOptions& networkOptions)
 {
     return new INetwork(networkOptions);
 }
 
-armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
+armnn::INetworkPtr INetwork::Create(const NetworkOptions& networkOptions)
 {
     return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
 }
@@ -1879,7 +1879,7 @@
     return allowExpandedDims;
 }
 
-NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
+NetworkImpl::NetworkImpl(const NetworkOptions& networkOptions)
 : m_NetworkOptions(networkOptions),
   m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod(), GetAllowExpandedDims()))
 {}
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 8bd56d3..5ca16e2 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -31,7 +31,7 @@
 class NetworkImpl
 {
 public:
-    NetworkImpl(NetworkOptions networkOptions = {});
+    NetworkImpl(const NetworkOptions& networkOptions = {});
     ~NetworkImpl();
 
     const Graph& GetGraph() const
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 4d63b48..797c09a 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -514,6 +514,7 @@
                                      armnn::ShapeInferenceMethod::InferAndValidate :
                                      armnn::ShapeInferenceMethod::ValidateOnly;
     options.m_ProfilingEnabled = m_Params.m_EnableProfiling;
+    options.m_AllowExpandedDims = m_Params.m_AllowExpandedDims;
 
     armnn::BackendOptions gpuAcc("GpuAcc",
                                  {
@@ -530,6 +531,19 @@
                                  });
     options.m_ModelOptions.push_back(gpuAcc);
     options.m_ModelOptions.push_back(cpuAcc);
+    // The shapeInferenceMethod and allowExpandedDims values have to be added to the model options
+    // because these are what are passed to the OptimizeSubgraphViews method and are used to create
+    // the new optimized INetwork that method uses
+    armnn::BackendOptions allowExDimOpt("AllowExpandedDims",
+                                        {
+                                                { "AllowExpandedDims", m_Params.m_AllowExpandedDims }
+                                        });
+    options.m_ModelOptions.push_back(allowExDimOpt);
+    armnn::BackendOptions shapeInferOpt("ShapeInferenceMethod",
+                                        {
+                                                { "InferAndValidate", m_Params.m_InferOutputShape }
+                                        });
+    options.m_ModelOptions.push_back(shapeInferOpt);
 
     const auto optimization_start_time = armnn::GetTimeNow();
     optNet = armnn::Optimize(*network, m_Params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
@@ -758,6 +772,7 @@
     armnnTfLiteParser::ITfLiteParser::TfLiteParserOptions options;
     options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
     options.m_InferAndValidate = params.m_InferOutputShape;
+    options.m_AllowExpandedDims = params.m_AllowExpandedDims;
 
     m_Parser = armnnTfLiteParser::ITfLiteParser::Create(options);
 }