IVGCVSW-4980 Introduce InferAndValidate option to ExecuteNetwork for parsers

* Introduced infer-output-shape option to TfLiteParser in ExecuteNetwork app

!armnn:3591

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I30bd5e51ac2b6759169e22a44586fd97986f2402
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 9b79c8c..f2763a7 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -134,7 +134,10 @@
              "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
              "Requires tuning-path to be set, default is set to 0 (No tuning run)")
             ("parse-unsupported", po::bool_switch()->default_value(false),
-                "Add unsupported operators as stand-in layers (where supported by parser)");
+                "Add unsupported operators as stand-in layers (where supported by parser)")
+            ("infer-output-shape", po::bool_switch()->default_value(false),
+                "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
+                "parser)");
     }
     catch (const std::exception& e)
     {
@@ -183,6 +186,7 @@
     bool fileOnlyExternalProfiling = vm["file-only-external-profiling"].as<bool>();
     bool parseUnsupported = vm["parse-unsupported"].as<bool>();
     bool timelineEnabled = vm["timeline-profiling"].as<bool>();
+    bool inferOutputShape = vm["infer-output-shape"].as<bool>();
 
     if (enableBf16TurboMode && enableFp16TurboMode)
     {
@@ -245,7 +249,8 @@
                 testCase.values.insert(testCase.values.begin(), executableName);
                 results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
                                              enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime,
-                                             printIntermediate, enableLayerDetails, parseUnsupported));
+                                             printIntermediate, enableLayerDetails, parseUnsupported,
+                                             inferOutputShape));
             }
 
             // Check results
@@ -265,7 +270,7 @@
                 testCase.values.insert(testCase.values.begin(), executableName);
                 if (RunCsvTest(testCase, runtime, enableProfiling,
                                enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate,
-                               enableLayerDetails, parseUnsupported) != EXIT_SUCCESS)
+                               enableLayerDetails, parseUnsupported, inferOutputShape) != EXIT_SUCCESS)
                 {
                     return EXIT_FAILURE;
                 }
@@ -298,7 +303,7 @@
                     dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput,
                     outputTypes, outputNames, outputTensorFiles, dequantizeOutput, enableProfiling,
                     enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, subgraphId,
-                    enableLayerDetails, parseUnsupported);
+                    enableLayerDetails, parseUnsupported, inferOutputShape);
             }
             ARMNN_LOG(info) << "Using tuning params: " << tuningPath << "\n";
             options.m_BackendOptions.emplace_back(
@@ -330,6 +335,7 @@
         return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath,
             inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
             outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
-            thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, iterations, runtime);
+            thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, inferOutputShape,
+            iterations, runtime);
     }
 }
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 5588d55..68ee8ae 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -95,6 +95,7 @@
     bool                            m_EnableBf16TurboMode;
     bool                            m_PrintIntermediateLayers;
     bool                            m_ParseUnsupported;
+    bool                            m_InferOutputShape;
 
     Params()
         : m_ComputeDevices{}
@@ -105,6 +106,7 @@
         , m_EnableBf16TurboMode(false)
         , m_PrintIntermediateLayers(false)
         , m_ParseUnsupported(false)
+        , m_InferOutputShape(false)
     {}
 };
 
@@ -241,6 +243,7 @@
         // Create a network from a file on disk
         IParser::TfLiteParserOptions options;
         options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
+        options.m_InferAndValidate           = params.m_InferOutputShape;
         auto parser(IParser::Create(options));
 
         armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 31f3791..69941d5 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <armnn/ArmNN.hpp>
@@ -375,6 +375,7 @@
     bool                          m_EnableLayerDetails = false;
     bool                          m_GenerateTensorData;
     bool                          m_ParseUnsupported = false;
+    bool                          m_InferOutputShape = false;
 };
 
 template<typename TParser, typename TDataType>
@@ -397,6 +398,7 @@
         inferenceModelParams.m_PrintIntermediateLayers        = params.m_PrintIntermediate;
         inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
         inferenceModelParams.m_ParseUnsupported               = params.m_ParseUnsupported;
+        inferenceModelParams.m_InferOutputShape               = params.m_InferOutputShape;
 
         for(const std::string& inputName: params.m_InputNames)
         {
@@ -550,6 +552,7 @@
             const size_t subgraphId,
             bool enableLayerDetails = false,
             bool parseUnsupported = false,
+            bool inferOutputShape = false,
             const size_t iterations = 1,
             const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
 {
@@ -678,6 +681,7 @@
     params.m_EnableLayerDetails       = enableLayerDetails;
     params.m_GenerateTensorData       = inputTensorDataFilePathsVector.empty();
     params.m_ParseUnsupported         = parseUnsupported;
+    params.m_InferOutputShape         = inferOutputShape;
 
     // Warn if ExecuteNetwork will generate dummy input data
     if (params.m_GenerateTensorData)
@@ -749,7 +753,7 @@
 int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
                const bool enableProfiling, const bool enableFp16TurboMode, const bool enableBf16TurboMode,
                const double& thresholdTime, const bool printIntermediate, bool enableLayerDetails = false,
-               bool parseUnuspported = false)
+               bool parseUnuspported = false, bool inferOutputShape = false)
 {
     IgnoreUnused(runtime);
     std::string modelFormat;
@@ -869,7 +873,8 @@
     return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                    inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
                    dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
-                   thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported);
+                   thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported,
+                   inferOutputShape);
 }
 
 #if defined(ARMCOMPUTECL_ENABLED)
@@ -895,7 +900,8 @@
             bool printIntermediate,
             const size_t subgraphId,
             bool enableLayerDetails = false,
-            bool parseUnsupported = false)
+            bool parseUnsupported = false,
+            bool inferOutputShape = false)
 {
     armnn::IRuntime::CreationOptions options;
     options.m_BackendOptions.emplace_back(
@@ -917,7 +923,8 @@
     int state = RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                         inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
                         outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
-                        thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, 1, runtime);
+                        thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported,
+                        inferOutputShape, 1, runtime);
 
     ARMNN_LOG(info) << "Tuning time: " << std::setprecision(2)
                     << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";