IVGCVSW-6255 Investigate and fix running mobilebert with the TfLiteDelegate (CpuRef)

 * Fixed bug occuring in Ref Gather Workload.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I3ee79f475fd9909bfbd4afb58f698439f26d6d65
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index f0a3d08..153fe5b 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -110,16 +110,14 @@
         std::cout << "Running on TfLite without ArmNN delegate\n";
     }
 
-    // Load (or generate) input data for inference
-    armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
-                                            ? armnn::EmptyOptional()
-                                            : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
-
     const size_t numInputs = params.m_InputNames.size();
-
     // Populate input tensor of interpreter
     for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
     {
+        // Load (or generate) input data for inference
+        armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ? armnn::EmptyOptional() :
+            armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[inputIndex]);
+
         int input = tfLiteInterpreter->inputs()[inputIndex];
         TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;