Fix ExecuteNetwork no longer printing out the inference results,
breaking some of the nightly builds

 * The TensorPrinter must be called regardless of the value of
   the output tensor files, it will automatically handle an empty
   file path by only printing the results to the console
 * Code refactoring

Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Change-Id: I548ec7cf6d51badf78643c9a6c1c56ea9200142b
diff --git a/src/armnnUtils/TensorIOUtils.hpp b/src/armnnUtils/TensorIOUtils.hpp
index 47e0a32..07f3723 100644
--- a/src/armnnUtils/TensorIOUtils.hpp
+++ b/src/armnnUtils/TensorIOUtils.hpp
@@ -37,11 +37,10 @@
                              {
                                  if (value.size() != inputBinding.second.GetNumElements())
                                  {
-                                    std::ostringstream msg;
-                                    msg << "Input tensor has incorrect size (expected "
-                                        << inputBinding.second.GetNumElements() << " got "
-                                        << value.size();
-                                    throw armnn::Exception(msg.str());
+                                    throw armnn::Exception(boost::str(boost::format("Input tensor has incorrect size "
+                                                                                    "(expected %1% got %2%)")
+                                                                      % inputBinding.second.GetNumElements()
+                                                                      % value.size()));
                                  }
 
                                  armnn::ConstTensor inputTensor(inputBinding.second, value.data());
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 004e9fb..2556a10 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -471,24 +471,20 @@
         // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
         auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
 
-        // Print output tensors (if requested)
-        if (!params.m_OutputTensorFiles.empty())
+        if (params.m_GenerateTensorData)
         {
-            if (params.m_GenerateTensorData)
-            {
-                BOOST_LOG_TRIVIAL(warning) << "Requested to write output to file, although the input was generated. "
-                                           << "Note that the output will not be useful.";
-            }
+            BOOST_LOG_TRIVIAL(warning) << "The input data was generated, note that the output will not be useful";
+        }
 
-            const auto& infosOut = model.GetOutputBindingInfos();
-            for (size_t i = 0; i < numOutputs; i++)
-            {
-                const armnn::TensorInfo& infoOut = infosOut[i].second;
-                auto outputTensorFile = params.m_OutputTensorFiles[i];
+        // Print output tensors
+        const auto& infosOut = model.GetOutputBindingInfos();
+        for (size_t i = 0; i < numOutputs; i++)
+        {
+            const armnn::TensorInfo& infoOut = infosOut[i].second;
+            auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
 
-                TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile);
-                boost::apply_visitor(printer, outputDataContainers[i]);
-            }
+            TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile);
+            boost::apply_visitor(printer, outputDataContainers[i]);
         }
 
         BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2)