Revert "IVGCVSW-6359 Added support for Float16 (Half) to Execute Network"

This reverts commit 2d9956162dd002a41f7fb4fa6753195d33524c7f.

Reason for revert: After some discussion, this does technically implement Float16 support for ExecuteNetwork, but not in a way which matches most use cases and is likely to cause issues in the future. Reverting for now.

Change-Id: I4ce6de6879216e694631f5dc68e46fb793fae0a9
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 0d52711..a0a08d3 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -12,7 +12,6 @@
 #include <armnnUtils/Filesystem.hpp>
 #include <armnnUtils/TContainer.hpp>
 #include <InferenceTest.hpp>
-#include <Half.hpp>
 
 #if defined(ARMNN_SERIALIZER)
 #include "armnnDeserializer/IDeserializer.hpp"
@@ -485,7 +484,7 @@
             armnn::DataType type = model.GetOutputBindingInfo(outputIdx).second.GetDataType();
             switch (type)
             {
-                // --output-type only supports float, float16, int,  qasymms8 or qasymmu8.
+                // --output-type only supports float, int,  qasymms8 or qasymmu8.
                 case armnn::DataType::Float32:
                     if (params.m_OutputTypes[outputIdx].compare("float") != 0)
                     {
@@ -494,14 +493,6 @@
                                            ". This may cause unexpected problems or random failures.";
                     }
                     break;
-                case armnn::DataType::Float16:
-                    if (params.m_OutputTypes[outputIdx].compare("float16") != 0)
-                    {
-                        ARMNN_LOG(warning) << "Model output index: " << outputIdx << " has data type Float16. The " <<
-                                           "corresponding --output-type is " << params.m_OutputTypes[outputIdx] <<
-                                           ". This may cause unexpected problems or random failures.";
-                    }
-                    break;
                 case armnn::DataType::QAsymmU8:
                     if (params.m_OutputTypes[outputIdx].compare("qasymmu8") != 0)
                     {
@@ -539,10 +530,6 @@
                 {
                     outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
                 }
-                else if (params.m_OutputTypes[i].compare("float16") == 0)
-                {
-                    outputDataContainers.push_back(std::vector<armnn::Half>(model.GetOutputSize(i)));
-                }
                 else if (params.m_OutputTypes[i].compare("int") == 0)
                 {
                     outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 25dbe91..8ee66cf 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -294,13 +294,13 @@
                 ("y,input-type",
                  "The type of the input tensors in the network separated by comma. "
                  "If unset, defaults to \"float\" for all defined inputs. "
-                 "Accepted values (float, float16, int, qasymms8 or qasymmu8).",
+                 "Accepted values (float, int, qasymms8 or qasymmu8).",
                  cxxopts::value<std::string>())
 
                 ("z,output-type",
                  "The type of the output tensors in the network separated by comma. "
                  "If unset, defaults to \"float\" for all defined outputs. "
-                 "Accepted values (float, float16, int,  qasymms8 or qasymmu8).",
+                 "Accepted values (float, int,  qasymms8 or qasymmu8).",
                  cxxopts::value<std::string>())
 
                 ("T,tflite-executor",
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 94dbfe7..b6087c5 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -67,14 +67,6 @@
                                 });
     }
 
-    void operator()(const std::vector<armnn::Half>& values)
-    {
-        SortPredictions(values, [](armnn::Half value)
-                                {
-                                    return value;
-                                });
-    }
-
     void operator()(const std::vector<int8_t>& values)
     {
         SortPredictions(values, [](int8_t value)
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 00ed55c..6c74aaa 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -34,15 +34,6 @@
 }
 
 template<>
-auto ParseDataArray<armnn::DataType::Float16>(std::istream& stream)
-{
-    return ParseArrayImpl<armnn::Half>(stream, [](const std::string& s)
-    {
-        return armnn::Half(std::stof(s));
-    });
-}
-
-template<>
 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
 {
     return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
@@ -148,20 +139,6 @@
     WriteToFile(values);
 }
 
-void TensorPrinter::operator()(const std::vector<armnn::Half>& values)
-{
-    if (m_PrintToConsole)
-    {
-        std::cout << m_OutputBinding << ": ";
-        ForEachValue(values, [](armnn::Half value)
-        {
-            printf("%f ", static_cast<float>(value));
-        });
-        printf("\n");
-    }
-    WriteToFile(values);
-}
-
 void TensorPrinter::operator()(const std::vector<uint8_t>& values)
 {
     if(m_DequantizeOutput)
@@ -284,12 +261,6 @@
                          GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
         }
     }
-    else if (dataTypeStr.compare("float16") == 0)
-    {
-        tensorData = readFromFile ?
-                     ParseDataArray<armnn::DataType::Float16>(inputTensorFile) :
-                     GenerateDummyTensorData<armnn::DataType::Float16>(numElements);
-    }
     else if (dataTypeStr.compare("int") == 0)
     {
         tensorData = readFromFile ?
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 8cd5c5b..bc2868a 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -36,8 +36,6 @@
 
     void operator()(const std::vector<int8_t>& values);
 
-    void operator()(const std::vector<armnn::Half>& values);
-
 private:
     template<typename Container, typename Delegate>
     void ForEachValue(const Container& c, Delegate delegate);