MLCE-545 INT8 TFLite model execution abnormal

 * Add functionality to print output tensors to file in tempdir
 * UnitTests

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: Idfb4c186544187db1fecdfca11c662540f645439
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 0289a90..687f2c3 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -129,6 +129,7 @@
     OptimizerOptions()
         : m_ReduceFp32ToFp16(false)
         , m_Debug(false)
+        , m_DebugToFile(false)
         , m_ReduceFp32ToBf16(false)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
         , m_ImportEnabled(false)
@@ -139,9 +140,10 @@
     {}
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
-                     ModelOptions modelOptions = {}, bool exportEnabled = false)
+                     ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false)
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
+        , m_DebugToFile(debugToFile)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
         , m_ImportEnabled(importEnabled)
@@ -159,9 +161,10 @@
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
                      bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
-                     bool allowExpandedDims = false)
+                     bool debugToFile = false, bool allowExpandedDims = false)
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
+        , m_DebugToFile(debugToFile)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(shapeInferenceMethod)
         , m_ImportEnabled(importEnabled)
@@ -183,6 +186,7 @@
         stream << "\tReduceFp32ToFp16: " << m_ReduceFp32ToFp16 << "\n";
         stream << "\tReduceFp32ToBf16: " << m_ReduceFp32ToBf16 << "\n";
         stream << "\tDebug: " << m_Debug << "\n";
+        stream << "\tDebug to file: " << m_DebugToFile << "\n";
         stream << "\tShapeInferenceMethod: " <<
         (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
         stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
@@ -215,6 +219,9 @@
     // Add debug data for easier troubleshooting
     bool m_Debug;
 
+    // Pass debug data to separate output files for easier troubleshooting
+    bool m_DebugToFile;
+
     /// Reduces all Fp32 operators in the model to Bf16 for faster processing.
     /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
     ///       between layers that weren't in Fp32 in the first place or if the operator is not supported in Bf16.
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index 214ea7b..bd2b3ec 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -522,6 +522,8 @@
     LayerGuid m_Guid;
     std::string m_LayerName;
     unsigned int m_SlotIndex;
+
+    bool m_LayerOutputToFile = false;
 };
 
 struct RsqrtQueueDescriptor : QueueDescriptor
diff --git a/include/armnnUtils/Filesystem.hpp b/include/armnnUtils/Filesystem.hpp
index 0d29a75..00da50f 100644
--- a/include/armnnUtils/Filesystem.hpp
+++ b/include/armnnUtils/Filesystem.hpp
@@ -19,9 +19,16 @@
 namespace Filesystem
 {
 
+using FileContents = std::string;
+
 /// Returns a path to a file in the system temporary folder. If the file existed it will be deleted.
 fs::path NamedTempFile(const char* fileName);
 
+/// Returns full path to temporary folder
+std::string CreateDirectory(std::string sPath);
+
+FileContents ReadFileContentsIntoString(const std::string path);
+
 } // namespace armnnUtils
 } // namespace Filesystem