IVGCVSW-6513: Compilation failure in armnn-mobilenet-quant in ML-Examples

 * Move TContainer to armnnUtils library

Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Change-Id: I3c0f895d11b66f6ee224ac689a19d0477f990b98
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2def5fe..3d6f663 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -76,6 +76,7 @@
     include/armnnUtils/Filesystem.hpp
     include/armnnUtils/FloatingPointComparison.hpp
     include/armnnUtils/FloatingPointConverter.hpp
+    include/armnnUtils/TContainer.hpp
     include/armnnUtils/TensorUtils.hpp
     include/armnnUtils/Threads.hpp
     include/armnnUtils/Transpose.hpp
@@ -108,7 +109,6 @@
     src/armnnUtils/TensorUtils.cpp
     src/armnnUtils/Threads.cpp
     src/armnnUtils/Transpose.cpp
-    third-party/mapbox/variant.hpp
     )
 
 add_library_ex(armnnUtils STATIC ${armnnUtils_sources})
diff --git a/include/armnn/Utils.hpp b/include/armnn/Utils.hpp
index 533117c..7d442ba 100644
--- a/include/armnn/Utils.hpp
+++ b/include/armnn/Utils.hpp
@@ -4,9 +4,8 @@
 //
 #pragma once
 
-#include "armnn/TypesUtils.hpp"
+#include <armnn/TypesUtils.hpp>
 
-#include <mapbox/variant.hpp>
 #include <iostream>
 
 namespace armnn
@@ -42,9 +41,4 @@
 
 const std::string GetVersion();
 
-// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
-// defining your own.
-using TContainer =
-        mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
-
 } // namespace armnn
diff --git a/include/armnnUtils/TContainer.hpp b/include/armnnUtils/TContainer.hpp
new file mode 100644
index 0000000..a55f9df
--- /dev/null
+++ b/include/armnnUtils/TContainer.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/TypesUtils.hpp>
+
+#include <mapbox/variant.hpp>
+
+namespace armnnUtils
+{
+
+// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
+// defining your own.
+    using TContainer =
+    mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
+
+} // namespace armnnUtils
diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp
index 47ae3f4..59711a5 100644
--- a/src/armnn/test/ModelAccuracyCheckerTest.cpp
+++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 #include "ModelAccuracyChecker.hpp"
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
 
 #include <doctest/doctest.h>
 
@@ -60,12 +60,12 @@
 
     // Add image 1 and check accuracy
     std::vector<float> inferenceOutputVector1 = {0.05f, 0.10f, 0.70f, 0.15f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
-    armnn::TContainer inference1Container(inferenceOutputVector1);
-    std::vector<armnn::TContainer> outputTensor1;
+    armnnUtils::TContainer inference1Container(inferenceOutputVector1);
+    std::vector<armnnUtils::TContainer> outputTensor1;
     outputTensor1.push_back(inference1Container);
 
     std::string imageName = "val_01.JPEG";
-    checker.AddImageResult<armnn::TContainer>(imageName, outputTensor1);
+    checker.AddImageResult<armnnUtils::TContainer>(imageName, outputTensor1);
 
     // Top 1 Accuracy
     float totalAccuracy = checker.GetAccuracy(1);
@@ -73,12 +73,12 @@
 
     // Add image 2 and check accuracy
     std::vector<float> inferenceOutputVector2 = {0.10f, 0.0f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
-    armnn::TContainer inference2Container(inferenceOutputVector2);
-    std::vector<armnn::TContainer> outputTensor2;
+    armnnUtils::TContainer inference2Container(inferenceOutputVector2);
+    std::vector<armnnUtils::TContainer> outputTensor2;
     outputTensor2.push_back(inference2Container);
 
     imageName = "val_02.JPEG";
-    checker.AddImageResult<armnn::TContainer>(imageName, outputTensor2);
+    checker.AddImageResult<armnnUtils::TContainer>(imageName, outputTensor2);
 
     // Top 1 Accuracy
     totalAccuracy = checker.GetAccuracy(1);
@@ -90,12 +90,12 @@
 
     // Add image 3 and check accuracy
     std::vector<float> inferenceOutputVector3 = {0.0f, 0.10f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
-    armnn::TContainer inference3Container(inferenceOutputVector3);
-    std::vector<armnn::TContainer> outputTensor3;
+    armnnUtils::TContainer inference3Container(inferenceOutputVector3);
+    std::vector<armnnUtils::TContainer> outputTensor3;
     outputTensor3.push_back(inference3Container);
 
     imageName = "val_03.JPEG";
-    checker.AddImageResult<armnn::TContainer>(imageName, outputTensor3);
+    checker.AddImageResult<armnnUtils::TContainer>(imageName, outputTensor3);
 
     // Top 1 Accuracy
     totalAccuracy = checker.GetAccuracy(1);
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index db15872..dd3c0a3 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -9,8 +9,8 @@
 #include <AsyncExecutionCallback.hpp>
 
 #include <armnn/Logging.hpp>
-#include <armnn/Utils.hpp>
 #include <armnnUtils/Filesystem.hpp>
+#include <armnnUtils/TContainer.hpp>
 #include <InferenceTest.hpp>
 
 #if defined(ARMNN_SERIALIZER)
@@ -370,8 +370,8 @@
 {
     using namespace std::chrono;
 
-    std::vector<std::vector<armnn::TContainer>> inputs;
-    std::vector<std::vector<armnn::TContainer>> outputs;
+    std::vector<std::vector<armnnUtils::TContainer>> inputs;
+    std::vector<std::vector<armnnUtils::TContainer>> outputs;
 
     try
     {
@@ -436,7 +436,7 @@
 
         for(unsigned int j = 0; j < params.m_Iterations ; ++j)
         {
-            std::vector<armnn::TContainer> inputDataContainers;
+            std::vector<armnnUtils::TContainer> inputDataContainers;
             for(unsigned int i = 0; i < numInputs; ++i)
             {
                 // If there are less input files given than required for the execution of
@@ -460,7 +460,7 @@
                     numElements = params.m_InputTensorShapes[i]->GetNumElements();
                 }
 
-                armnn::TContainer tensorData;
+                armnnUtils::TContainer tensorData;
                 PopulateTensorWithData(tensorData,
                                        numElements,
                                        params.m_InputTypes[i],
@@ -476,7 +476,7 @@
 
         for (unsigned int j = 0; j < params.m_Iterations; ++j)
         {
-            std::vector <armnn::TContainer> outputDataContainers;
+            std::vector <armnnUtils::TContainer> outputDataContainers;
             for (unsigned int i = 0; i < numOutputs; ++i)
             {
                 if (params.m_OutputTypes[i].compare("float") == 0)
@@ -596,7 +596,7 @@
             {
                 ARMNN_LOG(info) << "Asynchronous execution with Arm NN thread pool...  \n";
                 armnn::AsyncCallbackManager callbackManager;
-                std::unordered_map<armnn::InferenceId, std::vector<armnn::TContainer>&> inferenceOutputMap;
+                std::unordered_map<armnn::InferenceId, std::vector<armnnUtils::TContainer>&> inferenceOutputMap;
 
                 // Declare the latest and earliest inference times here to be used when calculating overall time
                 std::chrono::high_resolution_clock::time_point earliestStartTime;
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index a69a098..0f1cf6d 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -296,7 +296,7 @@
     const unsigned int batchSize = 1;
     const armnn::DataLayout outputLayout(cmdline.GetLayout());
 
-    std::vector<armnn::TContainer> imageDataContainers;
+    std::vector<armnnUtils::TContainer> imageDataContainers;
     const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
     try
     {
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index c668608..b9579e7 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -5,9 +5,9 @@
 
 #include "../InferenceTestImage.hpp"
 
-#include <armnn/Utils.hpp>
 #include <armnn/TypesUtils.hpp>
 
+#include <armnnUtils/TContainer.hpp>
 #include <armnnUtils/Permute.hpp>
 
 #include <algorithm>
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index e2cd5d9..13f7d74 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -7,13 +7,15 @@
 
 
 #include <armnn/ArmNN.hpp>
-#include <armnn/Utils.hpp>
 #include <armnn/Threadpool.hpp>
 #include <armnn/Logging.hpp>
 #include <armnn/utility/Timer.hpp>
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/TContainer.hpp>
+
 #include <common/include/ProfilingGuid.hpp>
 
 #if defined(ARMNN_SERIALIZER)
@@ -584,8 +586,8 @@
     }
 
     std::chrono::duration<double, std::milli> Run(
-            const std::vector<armnn::TContainer>& inputContainers,
-            std::vector<armnn::TContainer>& outputContainers)
+            const std::vector<armnnUtils::TContainer>& inputContainers,
+            std::vector<armnnUtils::TContainer>& outputContainers)
     {
         for (unsigned int i = 0; i < outputContainers.size(); ++i)
         {
@@ -633,8 +635,8 @@
 
     std::tuple<unsigned int, std::chrono::duration<double, std::milli>> RunAsync(
         armnn::experimental::IWorkingMemHandle& workingMemHandleRef,
-        const std::vector<armnn::TContainer>& inputContainers,
-        std::vector<armnn::TContainer>& outputContainers,
+        const std::vector<armnnUtils::TContainer>& inputContainers,
+        std::vector<armnnUtils::TContainer>& outputContainers,
         unsigned int inferenceID)
     {
         for (unsigned int i = 0; i < outputContainers.size(); ++i)
@@ -684,8 +686,8 @@
         }
     }
 
-    void RunAsync(const std::vector<armnn::TContainer>& inputContainers,
-                  std::vector<armnn::TContainer>& outputContainers,
+    void RunAsync(const std::vector<armnnUtils::TContainer>& inputContainers,
+                  std::vector<armnnUtils::TContainer>& outputContainers,
                   std::shared_ptr<armnn::IAsyncExecutionCallback> cb)
     {
         for (unsigned int i = 0; i < outputContainers.size(); ++i)
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index d0bb0c0..fb9c048 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -7,11 +7,12 @@
 #include "InferenceModel.hpp"
 
 #include <armnn/ArmNN.hpp>
-#include <armnn/Utils.hpp>
 #include <armnn/Logging.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
+#include <armnnUtils/TContainer.hpp>
+
 #include <cxxopts/cxxopts.hpp>
 #include <fmt/format.h>
 
@@ -114,7 +115,7 @@
 
     InferenceModelTestCase(TModel& model,
                            unsigned int testCaseId,
-                           const std::vector<armnn::TContainer>& inputs,
+                           const std::vector<armnnUtils::TContainer>& inputs,
                            const std::vector<unsigned int>& outputSizes)
         : m_Model(model)
         , m_TestCaseId(testCaseId)
@@ -137,13 +138,13 @@
 
 protected:
     unsigned int GetTestCaseId() const { return m_TestCaseId; }
-    const std::vector<armnn::TContainer>& GetOutputs() const { return m_Outputs; }
+    const std::vector<armnnUtils::TContainer>& GetOutputs() const { return m_Outputs; }
 
 private:
     TModel&                         m_Model;
     unsigned int                    m_TestCaseId;
-    std::vector<armnn::TContainer>  m_Inputs;
-    std::vector<armnn::TContainer>  m_Outputs;
+    std::vector<armnnUtils::TContainer>  m_Inputs;
+    std::vector<armnnUtils::TContainer>  m_Outputs;
 };
 
 template <typename TTestCaseDatabase, typename TModel>
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 91a90f3..b6087c5 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -7,6 +7,8 @@
 #include <armnn/Utils.hpp>
 #include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
+#include <armnnUtils/TContainer.hpp>
+
 #include "CxxoptsUtils.hpp"
 
 #include <cxxopts/cxxopts.hpp>
@@ -38,7 +40,7 @@
     unsigned int label,
     std::vector<typename TModel::DataType> modelInput)
     : InferenceModelTestCase<TModel>(
-            model, testCaseId, std::vector<armnn::TContainer>{ modelInput }, { model.GetOutputSize() })
+            model, testCaseId, std::vector<armnnUtils::TContainer>{ modelInput }, { model.GetOutputSize() })
     , m_Label(label)
     , m_QuantizationParams(model.GetQuantizationParams())
     , m_NumInferencesRef(numInferencesRef)
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index d1d31f4..c08d88e 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -9,7 +9,7 @@
 #include "armnnDeserializer/IDeserializer.hpp"
 
 #include <armnnUtils/Filesystem.hpp>
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
 
 #include <cxxopts/cxxopts.hpp>
 #include <map>
@@ -325,8 +325,8 @@
                 const std::string imageName = imageEntry.first;
                 std::cout << "Processing image: " << imageName << "\n";
 
-                vector<armnn::TContainer> inputDataContainers;
-                vector<armnn::TContainer> outputDataContainers;
+                vector<armnnUtils::TContainer> inputDataContainers;
+                vector<armnnUtils::TContainer> outputDataContainers;
 
                 auto imagePath = pathToDataDir / fs::path(imageName);
                 switch (inputTensorDataType)
@@ -370,7 +370,7 @@
                     ARMNN_LOG(fatal) << "armnn::IRuntime: Failed to enqueue workload for image: " << imageName;
                 }
 
-                checker.AddImageResult<armnn::TContainer>(imageName, outputDataContainers);
+                checker.AddImageResult<armnnUtils::TContainer>(imageName, outputDataContainers);
             }
         }
         else
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 323e9fb..6c74aaa 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -228,7 +228,7 @@
     }
 }
 
-void PopulateTensorWithData(armnn::TContainer& tensorData,
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
                             unsigned int numElements,
                             const std::string& dataTypeStr,
                             const armnn::Optional<QuantizationParams>& qParams,
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index d9e2459..bc2868a 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -9,7 +9,7 @@
 #include <armnn/Types.hpp>
 #include <armnn/Logging.hpp>
 #include <armnn/utility/StringUtils.hpp>
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
 
 #include <iostream>
 #include <fstream>
@@ -53,7 +53,7 @@
 
 using QuantizationParams = std::pair<float, int32_t>;
 
-void PopulateTensorWithData(armnn::TContainer& tensorData,
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
                             unsigned int numElements,
                             const std::string& dataTypeStr,
                             const armnn::Optional<QuantizationParams>& qParams,