Bug Fixes for refactor of the ExecuteNetwork.
* dot file to be generated when -v is given. It was only being generated when using the delegate as executor
* output name read from m_Params.m_OutputNames instead of m_TfLiteInterpreter
* typo: "delage" instead of "delegate"
* QAsymmS8 templated as int8, instead of uint8
Change-Id: Ie13ae0f7e6395c0ebcb5ecda32e72082dee8aa6c
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Iac97a23927ba42290ebeb3446bbd36da15045e07
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp
index dc495be..98b6c9d 100644
--- a/tests/ExecuteNetwork/TfliteExecutor.cpp
+++ b/tests/ExecuteNetwork/TfliteExecutor.cpp
@@ -144,7 +144,7 @@
outputSize *= outputDims->data[dim];
}
- std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
+ std::cout << m_Params.m_OutputNames[outputIndex] << ": ";
results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
@@ -152,38 +152,38 @@
case kTfLiteFloat32:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%f ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
}
break;
}
case kTfLiteInt32:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
}
break;
}
case kTfLiteUInt8:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%u ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
}
break;
}
case kTfLiteInt8:
{
- auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
for (int i = 0; i < outputSize; ++i)
{
- fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]);
+ fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
}
break;
}