IVGCVSW-6009 Enable creating thread pool with 1 thread

* Allow the user to use create a tread pool with a single thread
* This is in keeping with how the android-nn-driver was implemented
* Add it to ExecuteNetwork thread pool creation

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I05b8048a9e0e45ae11d2b585080af28d9d008d81
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index cd760a8..e8d5b18 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -440,7 +440,7 @@
             }
         }
         // Asynchronous execution using the Arm NN thread pool
-        else if (params.m_ThreadPoolSize >= 2)
+        else if (params.m_ThreadPoolSize >= 1)
         {
             try
             {
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 189ece2..4002e89 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -137,14 +137,6 @@
 
         CheckModelFormat(m_ModelFormat);
 
-        // Check number of simultaneous iterations
-        // Testing std::launch::async with a single iteration is possible if concurrent is manually set
-        if ((m_SimultaneousIterations <= 1 && m_ThreadPoolSize > 1) ||
-            (m_SimultaneousIterations <= 1 && !m_Concurrent))
-        {
-            ARMNN_LOG(fatal) << "simultaneous-iterations cannot be less than 2.";
-        }
-
         // Check input tensor shapes
         if ((m_InputTensorShapes.size() != 0) &&
             (m_InputTensorShapes.size() != m_InputNames.size()))
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 1f57f85..25ddecf 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -292,8 +292,8 @@
 
                 ("thread-pool-size",
                  "Number of Arm NN threads to use when running the network asynchronously via the Arm NN thread pool. "
-                 "The default is set to 1",
-                 cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("1"));
+                 "The default is set to 0",
+                 cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"));
 
         m_CxxOptions.add_options("c) Optimization")
                 ("bf16-turbo-mode",
@@ -461,7 +461,7 @@
     }
 
     // Set concurrent to true if the user expects to run inferences asynchronously
-    if (m_ExNetParams.m_SimultaneousIterations > 1)
+    if (m_ExNetParams.m_SimultaneousIterations > 1 || m_ExNetParams.m_ThreadPoolSize > 0)
     {
         m_ExNetParams.m_Concurrent = true;
     }