MLCE-1138 Issue with Delegate supporting FP16 models

 * Fixed issue where backends were asked to support FP16 layers that would
   be optimized out.
 * Fixed issue where backends were asked to support non-constant filter
   and bias tensors when those tensors would be replaced by constant
   tensors during optimization.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ib54b9cb99d5014e27172841a665daf57d1d5b23d
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index cf0134e..71ecd4c 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -56,7 +56,6 @@
     {
         return kTfLiteError;
     }
-
     const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
     if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
     {
@@ -86,6 +85,7 @@
     if(biasEnabled)
     {
         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+
         if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
         {
             return kTfLiteError;
@@ -115,6 +115,27 @@
     armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
+        bool filterIsConst = filterTensorInfo.IsConstant();
+
+        if (!filterIsConst)
+        {
+            filterIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[1]);
+        }
+        armnn::TensorInfo filterTensorInfoCopy(filterTensorInfo);
+        filterTensorInfoCopy.SetConstant(filterIsConst);
+        armnn::Optional<armnn::TensorInfo> optionalBiasInfoCopy(biasTensorInfo);
+
+        if (biasEnabled)
+        {
+            bool biasIsConst = biasTensorInfo.IsConstant();
+
+            if (!biasIsConst)
+            {
+                biasIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[2]);
+            }
+            optionalBiasInfoCopy.value().SetConstant(biasIsConst);
+        }
+
         bool isSupported = false;
         FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
                                    tfLiteContext,
@@ -125,8 +146,8 @@
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
-                                   filterTensorInfo,
-                                   optionalBiasInfo);
+                                   filterTensorInfoCopy,
+                                   optionalBiasInfoCopy);
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
@@ -480,6 +501,28 @@
     armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
+        bool filterIsConst = filterTensorInfo.IsConstant();
+
+        if (!filterIsConst)
+        {
+            filterIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[1]);
+        }
+        armnn::TensorInfo filterTensorInfoCopy(filterTensorInfo);
+        filterTensorInfoCopy.SetConstant(filterIsConst);
+
+        armnn::Optional<armnn::TensorInfo> optionalBiasInfoCopy(biasTensorInfo);
+
+        if (biasEnabled)
+        {
+            bool biasIsConst = biasTensorInfo.IsConstant();
+
+            if (!biasIsConst)
+            {
+                biasIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[2]);
+            }
+            optionalBiasInfoCopy.value().SetConstant(biasIsConst);
+        }
+
         bool isSupported = false;
         FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
                                    tfLiteContext,
@@ -490,8 +533,8 @@
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
-                                   filterTensorInfo,
-                                   armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
+                                   filterTensorInfoCopy,
+                                   optionalBiasInfoCopy);
         return isSupported ? kTfLiteOk : kTfLiteError;
     }