IVGCVSW-5625 Add support for Float16 to Delegate
* Float16 unit tests for Reshape
* Remove unsupported data type from Pad
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Ib1804bb6e708a0552fb40d05fe8a6511936f9793
diff --git a/delegate/src/test/TestUtils.cpp b/delegate/src/test/TestUtils.cpp
index 31c05a6..2787147 100644
--- a/delegate/src/test/TestUtils.cpp
+++ b/delegate/src/test/TestUtils.cpp
@@ -8,8 +8,6 @@
namespace armnnDelegate
{
-
-
void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize)
{
auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
@@ -63,4 +61,69 @@
}
}
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
+ }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(tensor1[i].data == tensor2[i].data);
+ }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize)
+{
+ for (size_t i = 0; i < tensorSize; i++)
+ {
+ CHECK(tensor1[i].data == half_float::detail::float2half<std::round_indeterminate, float>(tensor2[i]));
+ }
+}
+
+template <>
+void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
+ std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
+ std::vector<int32_t>& expectedOutputShape,
+ std::vector<Half>& expectedOutputValues,
+ unsigned int outputIndex)
+{
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
+ auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
+ auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
+
+ CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+ CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+
+ for (size_t i = 0; i < expectedOutputShape.size(); i++)
+ {
+ CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+ CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+ CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+ }
+
+ armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+}
+
+template <>
+void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
+{
+ auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
+ auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
+
+ }
+}
+
} // namespace armnnDelegate
\ No newline at end of file