IVGCVSW-7501 Allow constant tensors as inputs for input data in the delegate

 In the TLCT tests we were failing many tests because they used
 constant tensors as data input for the layers. We had the functionality
 in place but we didnt have it spread across the visit functions.

 * Check if inputs are constant tensors and attempt to assign them
   to input slot of layers.
 * Add missing checks to some functions that return a kTfLiteStatus
   so we can see if they fail
 * Clean up CreateConstTensor function

Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: I8610b770aea56932a98f91c961d59b3de47c2ab5
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 3560bfd..59066d2 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -120,6 +120,12 @@
     armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(activationLayer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index dd28807..4e4a2a3 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -119,6 +119,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/BatchMatMul.hpp b/delegate/src/BatchMatMul.hpp
index 3b884a0..49fba05 100644
--- a/delegate/src/BatchMatMul.hpp
+++ b/delegate/src/BatchMatMul.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -95,8 +95,13 @@
 
         armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
         outputSlot.SetTensorInfo(outputTensorInfo);
-        Connect(layer, tfLiteNode, delegateData);
 
-        return kTfLiteOk;
+        // try to connect the Constant Inputs if there are any
+        if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+        {
+            return kTfLiteError;
+        }
+
+       return Connect(layer, tfLiteNode, delegateData);
     }
 } // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 903fe37..30c6dbf 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -103,6 +103,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
@@ -197,6 +203,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index ee121e3..80354e8 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -116,6 +116,12 @@
     armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
                                         inputTensorInfo1,
                                         comparisonLayer,
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 2f83d2a..17f23d8 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -303,6 +303,13 @@
 
     armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     return Connect(meanLayer, tfLiteNode, delegateData);
 }
 
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 1718902..a8559e2 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -115,7 +115,7 @@
 
     }
 
-    armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+    const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     armnn::TensorInfo biasTensorInfo;
     if(biasEnabled)
@@ -181,12 +181,11 @@
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
     layer->SetBackendId(setBackend);
 
-    if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
+    if(filterTensorInfo.IsConstant())
     {
         auto filter =
                 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
-                                  filterTensorInfo,
-                                  armnn::Optional<armnn::PermutationVector &>());
+                                  filterTensorInfo);
 
         armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
         weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -196,7 +195,7 @@
     if (biasEnabled)
     {
         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
-        if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+        if(biasTensorInfo.IsConstant())
         {
             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
@@ -206,6 +205,18 @@
         }
     }
 
+    // The data input can also be constant, so we must check that this is also allocated to an input slot
+    if(inputTensorInfo.IsConstant())
+    {
+        auto input =
+                CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+                                  inputTensorInfo);
+
+        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+        inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    }
+
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -290,7 +301,7 @@
 
     }
 
-    armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+    const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     armnn::TensorInfo biasTensorInfo;
     if(biasEnabled)
@@ -354,11 +365,10 @@
 
     // Add a constant layer for weights and biases if inputs are constant,
     // which are connected to the Convolution3d layer as inputs.
-    if (tflite::IsConstantTensor(&tfLiteFilterTensor))
+    if (filterTensorInfo.IsConstant())
     {
         auto filter = CreateConstTensor(&tfLiteFilterTensor,
-                                        filterTensorInfo,
-                                        armnn::Optional<armnn::PermutationVector&>());
+                                        filterTensorInfo);
 
         armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
         ARMNN_ASSERT(weightsLayer != nullptr);
@@ -370,11 +380,10 @@
     if(biasEnabled)
     {
         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
-        if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+        if(biasTensorInfo.IsConstant())
         {
             auto biases = CreateConstTensor(&tfLiteBiasTensor,
-                                            biasTensorInfo,
-                                            armnn::Optional<armnn::PermutationVector&>());
+                                            biasTensorInfo);
 
             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
             ARMNN_ASSERT(biasLayer != nullptr);
@@ -384,6 +393,18 @@
         }
     }
 
+    // The data input can also be constant, so we must check that this is also allocated to an input slot
+    if(inputTensorInfo.IsConstant())
+    {
+        auto input =
+                CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+                                  inputTensorInfo);
+
+        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+        inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    }
+
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
@@ -499,7 +520,7 @@
 
     }
 
-    armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+    const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     // Assuming input is NHWC
     unsigned int inputHeight = inputTensorInfo.GetShape()[1];
@@ -563,7 +584,7 @@
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
     layer->SetBackendId(setBackend);
 
-    if(tflite::IsConstantTensor(&tfLiteFilterTensor))
+    if(filterTensorInfo.IsConstant())
     {
         // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
         auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
@@ -576,7 +597,7 @@
     if (biasEnabled)
     {
         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
-        if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+        if(biasTensorInfo.IsConstant())
         {
             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
@@ -586,6 +607,18 @@
         }
     }
 
+    // The data input can also be constant, so we must check that this is also allocated to an input slot
+    if(inputTensorInfo.IsConstant())
+    {
+        auto input =
+                CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+                                  inputTensorInfo);
+
+        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+        inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    }
+
     ARMNN_ASSERT(layer != nullptr);
 
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -636,19 +669,19 @@
         return kTfLiteError;
     }
 
-    armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
-    std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
-    if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
+    const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
+    std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
+    if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
     {
-        for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
+        for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
         {
             outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
         }
     }
 
-    if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
+    if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
     {
-        for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
+        for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
         {
             outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
         }
@@ -716,7 +749,7 @@
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
-    armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
+    const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     // TfLite uses NHWC tensors
     const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
@@ -743,8 +776,7 @@
 
     // Set up filter
     auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
-                                          filterTensorInfo,
-                                          armnn::Optional<armnn::PermutationVector&>());
+                                          filterTensorInfo);
     armnn::BackendId setBackend;
     if (!delegateData.m_Network)
     {
@@ -769,6 +801,18 @@
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
+    // The data input can be constant, so we must check that this is allocated to an input slot
+    if(inputTensorInfo.IsConstant())
+    {
+        auto input =
+                CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
+                                  inputTensorInfo);
+
+        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+        inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    }
+
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index c0bef4f..3e74225 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -545,10 +545,7 @@
 }
 
 armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
-                                     armnn::TensorInfo& tensorInfo,
-                                     armnn::Optional<armnn::PermutationVector&>
-                                             permutationVector = armnn::EmptyOptional(),
-                                     void* permutationData = nullptr)
+                                     const armnn::TensorInfo& tensorInfo)
 {
     if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
     {
@@ -556,28 +553,7 @@
             "TfLiteArmnnDelegate:  Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
     }
 
-    if(tflite::IsConstantTensor(tfLiteTensor))
-    {
-        tensorInfo.SetConstant();
-    }
-
-    if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
-    {
-        // Permute tensor info
-        tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
-        // then permute data using the shape from permuted tensor info
-        armnnUtils::Permute(tensorInfo.GetShape(),
-                            permutationVector.value(),
-                            tfLiteTensor->data.data,
-                            permutationData,
-                            armnn::GetDataTypeSize(tensorInfo.GetDataType()));
-
-        return armnn::ConstTensor(tensorInfo, permutationData);
-    }
-    else
-    {
-        return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
-    }
+    return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
 }
 
 armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
@@ -611,7 +587,7 @@
 }
 
 TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
-                             armnn::TensorInfo& constTensorInfo,
+                             const armnn::TensorInfo& constTensorInfo,
                              TfLiteContext* tfLiteContext,
                              const TfLiteTensor& tfLiteTensor,
                              armnnDelegate::DelegateData& data,
@@ -633,8 +609,7 @@
     }
 
     auto constantInput = CreateConstTensor(&tfLiteTensor,
-                                           constTensorInfo,
-                                           armnn::Optional<armnn::PermutationVector&>());
+                                           constTensorInfo);
     armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
     constantLayer->SetBackendId(setBackend);
     armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
@@ -684,8 +659,7 @@
                 return kTfLiteError;
             }
             auto constantInput = CreateConstTensor(&tfLiteInputTensor,
-                                                   inputTensorInfo,
-                                                   armnn::Optional<armnn::PermutationVector&>());
+                                                   inputTensorInfo);
             armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
             constantLayer->SetBackendId(setBackend);
             armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index 947e531..4be6fba 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -78,6 +78,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 337f115..1129951 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -54,7 +54,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    armnn::TensorInfo weightsTensorInfo       = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
+    const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Check that we support fused activation before we attempt to create a layer
@@ -82,8 +82,6 @@
         return kTfLiteError;
     }
 
-    bool isConstantWeights = tflite::IsConstantTensor(&tfLiteWeightsTensor);
-
     armnn::TensorInfo biasTensorInfo;
     if (biasEnabled)
     {
@@ -141,7 +139,7 @@
     armnn::FullyConnectedDescriptor descriptor;
     descriptor.m_TransposeWeightMatrix = true;
     descriptor.m_BiasEnabled           = biasEnabled;
-    descriptor.m_ConstantWeights       = isConstantWeights;
+    descriptor.m_ConstantWeights       = weightsTensorInfo.IsConstant();
 
     bool isSupported = false;
     armnn::BackendId setBackend;
@@ -172,11 +170,10 @@
     ARMNN_ASSERT(layer != nullptr);
 
     // Add a constant layer for weights and biases if inputs are constant.
-    if (isConstantWeights)
+    if (weightsTensorInfo.IsConstant())
     {
         auto weightsTensor = CreateConstTensor(&tfLiteWeightsTensor,
-                                               weightsTensorInfo,
-                                               armnn::Optional<armnn::PermutationVector&>());
+                                               weightsTensorInfo);
 
         armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor);
 
@@ -187,11 +184,10 @@
     if (biasEnabled)
     {
         const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
-        if(tflite::IsConstantTensor(&tfLiteBiasTensor))
+        if(biasTensorInfo.IsConstant())
         {
             auto biasTensor = CreateConstTensor(&tfLiteBiasTensor,
-                                                biasTensorInfo,
-                                                armnn::Optional<armnn::PermutationVector&>());
+                                                biasTensorInfo);
 
             armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
             ARMNN_ASSERT(biasLayer != nullptr);
@@ -201,6 +197,18 @@
         }
     }
 
+    // The data input can also be constant, so we must check that this is also allocated to an input slot
+    if(inputTensorInfo.IsConstant())
+    {
+        auto input =
+                CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
+                                  inputTensorInfo);
+
+        armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+        inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
+        inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    }
+
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
@@ -224,7 +232,7 @@
             delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(1));
         }
 
-        if (biasEnabled && !tflite::IsConstantTensor(&tfLiteTensors[tfLiteNode->inputs->data[2]]))
+        if (biasEnabled && !biasTensorInfo.IsConstant())
         {
             delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(2));
         }
@@ -233,7 +241,10 @@
 
     if (reshapeLayer == nullptr)
     {
-        Connect(layer, tfLiteNode, delegateData);
+        if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+        {
+            return kTfLiteError;
+        }
     }
     
     if (outputTensorInfo.GetNumDimensions() > 2)
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index d0db43e..ef2e524 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -70,6 +70,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
@@ -143,6 +149,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index 4dc8e0d..1178b6d 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -123,7 +123,17 @@
 
     armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
-    Connect(poolingLayer, tfLiteNode, delegateData);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
+    if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
 
     // Check and create activation
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
@@ -299,7 +309,17 @@
     // Create and set output slots
     armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
-    Connect(poolingLayer, tfLiteNode, delegateData);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
+    if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
 
     return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
 }
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 64f57de..f119296 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -159,6 +159,12 @@
     armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     return Connect(quantizeLayer, tfLiteNode, delegateData);
 }
 
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index 8f9a4e4..864fb7a 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -74,6 +74,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
@@ -240,6 +246,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index 3f4c118..2d8b462 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -133,6 +133,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index 0cb15d3..370f1ab 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -191,6 +191,12 @@
     armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     ARMNN_ASSERT(resizeLayer != nullptr);
 
     return Connect(resizeLayer, tfLiteNode, delegateData);
diff --git a/delegate/src/Round.hpp b/delegate/src/Round.hpp
index b920bd5..7a060b1 100644
--- a/delegate/src/Round.hpp
+++ b/delegate/src/Round.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -58,6 +58,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index 625e6a8..d797563 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -82,6 +82,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index d5712ae..f19e332 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -127,6 +127,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index 738f542..31c6ac3 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -142,6 +142,12 @@
     armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(softmaxLayer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index 2172d86..cc7f034 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -68,6 +68,12 @@
     layer->SetBackendId(setBackend);
     ARMNN_ASSERT(layer != nullptr);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
@@ -133,6 +139,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index 5c094b4..b183b55 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -334,6 +334,12 @@
         layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
     }
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/StridedSlice.hpp b/delegate/src/StridedSlice.hpp
index d2c4d5d..998e3d3 100644
--- a/delegate/src/StridedSlice.hpp
+++ b/delegate/src/StridedSlice.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -142,6 +142,12 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     // Connect
     return Connect(layer, tfLiteNode, delegateData);
 }
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 15c5310..41178d0 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -99,6 +99,12 @@
     armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
     return Connect(transposeLayer, tfLiteNode, delegateData);
 }
 } // namespace armnnDelegate