IVGCVSW-5560 Fix TfLiteDelegate Reshape operator failure

 * Fixed issue when running certain models with 2D shape tensor.
 * Falls back to inbuilt options if encountered.
 * Fixed ExecuteNetwork so that error messages are logged if NULL.
 * Updated TfLiteDelegate docs to include Logical Operators.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Signed-off-by: David Monahan <david.monahan@arm.com>
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I5dbaf30351f7fc86e6178a0caf46c152812088d3
diff --git a/delegate/TensorFlowLiteDelegateSupport.md b/delegate/TensorFlowLiteDelegateSupport.md
index d94f14e..c334018 100644
--- a/delegate/TensorFlowLiteDelegateSupport.md
+++ b/delegate/TensorFlowLiteDelegateSupport.md
@@ -38,6 +38,12 @@
 
 * LESS_OR_EQUAL
 
+* LOGICAL_AND
+  
+* LOGICAL_NOT
+  
+* LOGICAL_OR
+
 * LOGISTIC
 
 * LOG_SOFTMAX
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index e880383..5e130b2 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -19,8 +19,8 @@
 {
 
 TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
-                                           const std::vector<int32_t>& targetShape,
-                                           armnn::ReshapeDescriptor& reshapeDesc)
+                                     const std::vector<int32_t>& targetShape,
+                                     armnn::ReshapeDescriptor& reshapeDesc)
 {
     std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
     const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
@@ -67,22 +67,14 @@
 
     const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
     const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
-    if (IsDynamicTensor(tfLiteInputTensor0))
+    if (!IsValid(tfLiteContext, tfLiteInputTensor0, operatorCode, nodeIndex))
     {
-        TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
-                                 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
-                                 "operator #%d node #%d: ",
-                                 operatorCode, nodeIndex);
         return kTfLiteError;
     }
 
     const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
-    if (IsDynamicTensor(tfLiteOutputTensor))
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
     {
-        TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
-                                 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
-                                 "operator #%d node #%d: ",
-                                 operatorCode, nodeIndex);
         return kTfLiteError;
     }
 
@@ -91,18 +83,15 @@
 
     armnn::ReshapeDescriptor reshapeDesc;
     std::vector<int32_t> targetShape;
+    bool shapeSet = false;
 
     // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
     if (numInputs == 2)
     {
         // Get shape from the second input tensor
         const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
-        if (IsDynamicTensor(tfLiteShapeInputTensor))
+        if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
         {
-            TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
-                                     "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
-                                     "operator #%d node #%d: ",
-                                     operatorCode, nodeIndex);
             return kTfLiteError;
         }
 
@@ -110,20 +99,22 @@
         {
             TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
                                      "TfLiteArmnnDelegate: Target 'shape' input is not a 1D tensor in "
-                                     "operator #%d node #%d: ",
+                                     "operator #%d node #%d: Falling back to TfLiteOptions.",
                                      operatorCode, nodeIndex);
-            return kTfLiteError;
         }
-
-        // Get the shape data out of the input tensor
-        auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
-        auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
-        for (auto i=0; i < shapeTensorNumValues; ++i)
+        else
         {
-            targetShape.push_back(*(shapeTensorDataPtr+i));
+            // Get the shape data out of the input tensor
+            auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
+            auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
+            for (auto i=0; i < shapeTensorNumValues; ++i)
+            {
+                targetShape.push_back(*(shapeTensorDataPtr+i));
+            }
+            shapeSet = true;
         }
     }
-    else
+    if (!shapeSet)
     {
         // Get shape from the builtin data
         TfLiteReshapeParams* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 00507e0..6d60eaf 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -89,7 +89,7 @@
         {
             auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
 
-            if(tfLiteInterpreter == NULL)
+            if(inputData == NULL)
             {
                 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
                                     "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
@@ -109,7 +109,7 @@
         {
             auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
 
-            if(tfLiteInterpreter == NULL)
+            if(inputData == NULL)
             {
                 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
                                     "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
@@ -129,7 +129,7 @@
         {
             auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
 
-            if(tfLiteInterpreter == NULL)
+            if(inputData == NULL)
             {
                 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
                                     "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
@@ -149,7 +149,7 @@
         {
             auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
 
-            if(tfLiteInterpreter == NULL)
+            if(inputData == NULL)
             {
                 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
                                     "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";