IVGCVSW-6733 Add missing qasymms8 output type to delegate

Signed-off-by: Finn Williams <finn.williams@arm.com>
Change-Id: Ic5ebf7b80468b7751c234c43a90ec4cbf4c59ffe
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 085721c..f321a26 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -154,7 +154,8 @@
 
             std::copy(tensorData.begin(), tensorData.end(), inputData);
         }
-        else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
+        else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0 ||
+                 params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
 
@@ -215,26 +216,6 @@
 
             std::copy(tensorData.begin(), tensorData.end(), inputData);
         }
-        else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
-        {
-            auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
-
-            if(inputData == NULL)
-            {
-                ARMNN_LOG(fatal) << "Input tensor is null, input type: "
-                                    "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
-                return EXIT_FAILURE;
-            }
-
-            std::vector<int8_t> tensorData;
-            PopulateTensorWithDataGeneric<int8_t>(tensorData,
-                                                  inputSize,
-                                                  dataFile,
-                                                  [](const std::string& s)
-                                                  { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
-
-            std::copy(tensorData.begin(), tensorData.end(), inputData);
-        }
         else
         {
             ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
@@ -339,7 +320,8 @@
                     }
                 }
             }
-            else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
+            else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0 ||
+                     params.m_OutputTypes[outputIndex].compare("qasymms8") == 0)
             {
                 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
                 if(tfLiteDelageOutputData == NULL)