IVGCVSW-6929 Support for models with implicit expanded
dimensions
* Added allow-expanded-dims to TFLite parser and ArmNN delegate
* If true ArmNN will disregard dimensions with a size of 1 when
validating tensor shapes. Tensor sizes must still match.
* This allows us to support models where tensors have expanded
dimensions (i.e. extra dimensions with a size of 1).
* Fixed bug in Network where it assumed that only the first option
could be ShapeInferenceMethod.
* Fixed bug where m_ShapeInferenceMethod was lost when copying or
moving Graphs.
* Changed Delegate to pass "infer-output-shape", "allow-expanded-dims"
and other BackendOptions through to the Network during construction.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ibe7c5ae6597796fc9164cb07bd372bd7f8f8cacf
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index ddabf3c..f0a3d08 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -389,6 +389,7 @@
// Creates an InferenceModel, which will parse the model and load it into an IRuntime.
typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
inferenceModelParams.m_ModelPath = params.m_ModelPath;
+ inferenceModelParams.m_AllowExpandedDims = params.m_AllowExpandedDims;
inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index b3d18cd..cc75bb4 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -232,6 +232,11 @@
{
ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
}
+
+ if (m_AllowExpandedDims && m_InferOutputShape)
+ {
+ throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
+ }
}
#if defined(ARMNN_TFLITE_DELEGATE)
@@ -277,6 +282,22 @@
options.m_ModelOptions.push_back(gpuAcc);
options.m_ModelOptions.push_back(cpuAcc);
+ if (m_InferOutputShape)
+ {
+ armnn::BackendOptions networkOption("ShapeInferenceMethod",
+ {
+ {"InferAndValidate", true}
+ });
+ options.m_ModelOptions.push_back(networkOption);
+ }
+ if (m_AllowExpandedDims)
+ {
+ armnn::BackendOptions networkOption("AllowExpandedDims",
+ {
+ {"AllowExpandedDims", true}
+ });
+ options.m_ModelOptions.push_back(networkOption);
+ }
delegateOptions.SetOptimizerOptions(options);
// If v,visualize-optimized-model is enabled then construct a file name for the dot file.
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 04a0733..5ef2b6e 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -25,6 +25,7 @@
TfliteInterpreter
};
+ bool m_AllowExpandedDims;
std::string m_CachedNetworkFilePath;
std::vector<armnn::BackendId> m_ComputeDevices;
bool m_Concurrent;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index c84c79e..ad35092 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -228,6 +228,13 @@
"parser)",
cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
+ ("allow-expanded-dims",
+ "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
+ "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
+ "This parameter may be removed in a later update. ",
+ cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
+ ->implicit_value("true"))
+
("iterations",
"Number of iterations to run the network for, default is set to 1. "
"If you wish to run the model with different input data for every execution you can do so by "