Revert "IVGCVSW-6650 Refactor ExecuteNetwork"

This reverts commit 615e06f54a4c4139e81e289991ba4084aa2f69d3.

Reason for revert: <Breaking nightlies and tests>

Change-Id: I06a4a0119463188a653bb749033f78514645bd0c
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 104c1c5..5ef2b6e 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -16,6 +16,8 @@
 /// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter
 struct ExecuteNetworkParams
 {
+    using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
+
     enum class TfLiteExecutor
     {
         ArmNNTfLiteParser,
@@ -23,49 +25,50 @@
         TfliteInterpreter
     };
 
-    bool                              m_AllowExpandedDims;
-    std::string                       m_CachedNetworkFilePath;
-    std::vector<armnn::BackendId>     m_ComputeDevices;
-    bool                              m_Concurrent;
-    bool                              m_DequantizeOutput;
-    std::string                       m_DynamicBackendsPath;
-    bool                              m_EnableBf16TurboMode;
-    bool                              m_EnableFastMath = false;
-    bool                              m_EnableFp16TurboMode;
-    bool                              m_EnableLayerDetails = false;
-    bool                              m_EnableProfiling;
-    bool                              m_GenerateTensorData;
-    bool                              m_InferOutputShape = false;
-    bool                              m_EnableDelegate = false;
-    bool                              m_IsModelBinary;
-    std::vector<std::string>          m_InputNames;
-    std::vector<std::string>          m_InputTensorDataFilePaths;
-    std::vector<armnn::TensorShape>   m_InputTensorShapes;
-    size_t                            m_Iterations;
-    std::string                       m_ModelPath;
-    unsigned int                      m_NumberOfThreads;
-    bool                              m_OutputDetailsToStdOut;
-    bool                              m_OutputDetailsOnlyToStdOut;
-    std::vector<std::string>          m_OutputNames;
-    std::vector<std::string>          m_OutputTensorFiles;
-    bool                              m_ParseUnsupported = false;
-    bool                              m_PrintIntermediate;
-    bool                              m_DontPrintOutputs;
-    bool                              m_QuantizeInput;
-    bool                              m_SaveCachedNetwork;
-    size_t                            m_SubgraphId;
-    double                            m_ThresholdTime;
-    int                               m_TuningLevel;
-    std::string                       m_TuningPath;
-    std::string                       m_MLGOTuningFilePath;
-    TfLiteExecutor                    m_TfLiteExecutor;
-    size_t                            m_ThreadPoolSize;
-    bool                              m_ImportInputsIfAligned;
-    bool                              m_ReuseBuffers;
+    bool                          m_AllowExpandedDims;
+    std::string                   m_CachedNetworkFilePath;
+    std::vector<armnn::BackendId> m_ComputeDevices;
+    bool                          m_Concurrent;
+    bool                          m_DequantizeOutput;
+    std::string                   m_DynamicBackendsPath;
+    bool                          m_EnableBf16TurboMode;
+    bool                          m_EnableFastMath = false;
+    bool                          m_EnableFp16TurboMode;
+    bool                          m_EnableLayerDetails = false;
+    bool                          m_EnableProfiling;
+    bool                          m_GenerateTensorData;
+    bool                          m_InferOutputShape = false;
+    bool                          m_EnableDelegate = false;
+    std::vector<std::string>      m_InputNames;
+    std::vector<std::string>      m_InputTensorDataFilePaths;
+    std::vector<TensorShapePtr>   m_InputTensorShapes;
+    std::vector<std::string>      m_InputTypes;
+    bool                          m_IsModelBinary;
+    size_t                        m_Iterations;
+    std::string                   m_ModelFormat;
+    std::string                   m_ModelPath;
+    unsigned int                  m_NumberOfThreads;
+    bool                          m_OutputDetailsToStdOut;
+    bool                          m_OutputDetailsOnlyToStdOut;
+    std::vector<std::string>      m_OutputNames;
+    std::vector<std::string>      m_OutputTensorFiles;
+    std::vector<std::string>      m_OutputTypes;
+    bool                          m_ParseUnsupported = false;
+    bool                          m_PrintIntermediate;
+    bool                          m_DontPrintOutputs;
+    bool                          m_QuantizeInput;
+    bool                          m_SaveCachedNetwork;
+    size_t                        m_SimultaneousIterations;
+    size_t                        m_SubgraphId;
+    double                        m_ThresholdTime;
+    int                           m_TuningLevel;
+    std::string                   m_TuningPath;
+    std::string                   m_MLGOTuningFilePath;
+    TfLiteExecutor                m_TfLiteExecutor;
+    size_t                        m_ThreadPoolSize;
+    bool                          m_ImportInputsIfAligned;
+    bool                          m_ReuseBuffers;
 
-    std::string                       m_ComparisonFile;
-    std::vector<armnn::BackendId>     m_ComparisonComputeDevices;
-    bool                              m_CompareWithTflite;
     // Ensures that the parameters for ExecuteNetwork fit together
     void ValidateParams();