IVGCVSW-5377 'Add ArmNN TfLite delegate to ExecuteNetwork'

* Assign correct input values for the model
* Call the right Validate function for Mul and Sub operators
* Return the correct data type for kTfLiteInt8

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I6d23adf68d33d8be9a1fbf5d19dfe47939a6d3d6
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 0537ba9..fad07ff 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -342,14 +342,26 @@
         case kTfLiteUInt8:
             return armnn::DataType::QAsymmU8;
         case kTfLiteInt8:
-            if (tfLiteTensor.params.zero_point == 0)
+        {
+            auto quantizationInfo = tfLiteTensor.quantization;
+            if (quantizationInfo.type == kTfLiteAffineQuantization)
             {
-                return armnn::DataType::QSymmS8;
+                auto* quantization =
+                    reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+                if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
+                {
+                    return armnn::DataType::QAsymmS8;
+                }
+                else
+                {
+                    return armnn::DataType::QSymmS8;
+                }
             }
             else
             {
                 return armnn::DataType::QAsymmS8;
             }
+        }
         case kTfLiteInt16:
             return armnn::DataType::QSymmS16;
         case kTfLiteInt32:
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index e527005..49a5dfb 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -228,13 +228,13 @@
                                                inputTensorInfo1,
                                                outputTensorInfo);
             case kTfLiteBuiltinMul:
-                return ValidateDivOperator(delegateData,
+                return ValidateMulOperator(delegateData,
                                            tfLiteContext,
                                            inputTensorInfo0,
                                            inputTensorInfo1,
                                            outputTensorInfo);
             case kTfLiteBuiltinSub:
-                return ValidateDivOperator(delegateData,
+                return ValidateSubOperator(delegateData,
                                            tfLiteContext,
                                            inputTensorInfo0,
                                            inputTensorInfo1,
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index fa84a6e..ba7ce29 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -77,6 +77,14 @@
     for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
     {
         int input = tfLiteInterpreter->inputs()[inputIndex];
+        TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
+
+        long inputSize = 1;
+        for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
+        {
+            inputSize *=  inputDims->data[dim];
+        }
+
         if (params.m_InputTypes[inputIndex].compare("float") == 0)
         {
             auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
@@ -86,8 +94,15 @@
                                    params.m_InputTypes[inputIndex],
                                    armnn::EmptyOptional(),
                                    dataFile);
-            inputData = reinterpret_cast<float*>(&tensorData);
-            armnn::IgnoreUnused(inputData);
+
+            mapbox::util::apply_visitor([&](auto&& value)
+            {
+                for (unsigned int i = 0; i < inputSize; ++i)
+                {
+                    inputData[i] = value.data()[i];
+                }
+            },
+            tensorData);
         }
         else if (params.m_InputTypes[inputIndex].compare("int") == 0)
         {
@@ -98,8 +113,14 @@
                                    params.m_InputTypes[inputIndex],
                                    armnn::EmptyOptional(),
                                    dataFile);
-            inputData = reinterpret_cast<int32_t*>(&tensorData);
-            armnn::IgnoreUnused(inputData);
+            mapbox::util::apply_visitor([&](auto&& value)
+            {
+                for (unsigned int i = 0; i < inputSize; ++i)
+                {
+                    inputData[i] = value.data()[i];
+                }
+            },
+            tensorData);
         }
         else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
         {
@@ -110,8 +131,14 @@
                                    params.m_InputTypes[inputIndex],
                                    armnn::EmptyOptional(),
                                    dataFile);
-            inputData = reinterpret_cast<uint8_t*>(&tensorData);
-            armnn::IgnoreUnused(inputData);
+            mapbox::util::apply_visitor([&](auto&& value)
+            {
+                for (unsigned int i = 0; i < inputSize; ++i)
+                {
+                    inputData[i] = value.data()[i];
+                }
+            },
+            tensorData);
         }
         else
         {
@@ -128,21 +155,19 @@
         // Print out the output
         for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
         {
-            std::cout << "Printing out the output" << std::endl;
             auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
-            TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
+            TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
 
-            int outputSize = 1;
+            long outputSize = 1;
             for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
             {
-                outputSize *= outputDims->data[dim];
+                outputSize *=  outputDims->data[dim];
             }
 
             std::cout << params.m_OutputNames[outputIndex] << ": ";
             if (params.m_OutputTypes[outputIndex].compare("float") == 0)
             {
                 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-
                 if(tfLiteDelageOutputData == NULL)
                 {
                     ARMNN_LOG(fatal) << "Output tensor is null, output type: "
@@ -162,7 +187,6 @@
             else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
             {
                 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
-
                 if(tfLiteDelageOutputData == NULL)
                 {
                     ARMNN_LOG(fatal) << "Output tensor is null, output type: "
@@ -182,7 +206,6 @@
             else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
             {
                 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
-
                 if(tfLiteDelageOutputData == NULL)
                 {
                     ARMNN_LOG(fatal) << "Output tensor is null, output type: "