IVGCVSW-1733 - set enableGpuProfiling in CreationOptions for Runtime if -e flag is set,
false by default, use this globally same as concurrent flag, removed -e option from RunCsvTest options,
this is passed in from main

Change-Id: I246c2c40b1a113b896be0d41aba528e79ecdba0d
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 2e61dfa..ee20747 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -341,7 +341,7 @@
 }
 
 int RunCsvTest(const armnnUtils::CsvRow &csvRow,
-               const std::shared_ptr<armnn::IRuntime>& runtime)
+               const std::shared_ptr<armnn::IRuntime>& runtime, const bool enableProfiling)
 {
     std::string modelFormat;
     std::string modelPath;
@@ -370,9 +370,7 @@
          "This parameter is optional, depending on the network.")
         ("input-tensor-data,d", po::value(&inputTensorDataFilePath),
          "Path to a file containing the input data as a flat array separated by whitespace.")
-        ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
-        ("event-based-profiling,e", po::bool_switch()->default_value(false),
-         "Enables built in profiler. If unset, defaults to off.");
+        ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.");
     }
     catch (const std::exception& e)
     {
@@ -415,9 +413,6 @@
     boost::trim(inputTensorDataFilePath);
     boost::trim(outputName);
 
-    // Get the value of the switch arguments.
-    bool enableProfiling = vm["event-based-profiling"].as<bool>();
-
     // Get the preferred order of compute devices.
     std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
 
@@ -546,6 +541,8 @@
 
         // Create runtime
         armnn::IRuntime::CreationOptions options;
+        options.m_EnableGpuProfiling = enableProfiling;
+
         std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
 
         const std::string executableName("ExecuteNetwork");
@@ -560,7 +557,8 @@
             for (auto&  testCase : testCases)
             {
                 testCase.values.insert(testCase.values.begin(), executableName);
-                results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime)));
+                results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
+                                             enableProfiling));
             }
 
             // Check results
@@ -578,7 +576,7 @@
             for (auto&  testCase : testCases)
             {
                 testCase.values.insert(testCase.values.begin(), executableName);
-                if (RunCsvTest(testCase, runtime) != EXIT_SUCCESS)
+                if (RunCsvTest(testCase, runtime, enableProfiling) != EXIT_SUCCESS)
                 {
                     return EXIT_FAILURE;
                 }
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 6b81f80..2e0aff9 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -241,6 +241,7 @@
         else
         {
             armnn::IRuntime::CreationOptions options;
+            options.m_EnableGpuProfiling = m_EnableProfiling;
             m_Runtime = std::move(armnn::IRuntime::Create(options));
         }