IVGCVSW-7154 'Constant Tensors As Inputs'

* Fixed the issues about converting Conv2D and DepthwiseConv2d input issues
* Read 1D input tensors that do not have shape specified

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I12f3d1c57a2afedac42c6e7b31e4b1fc689abeca
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 5ac92f8..0071873 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -61,7 +61,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::ActivationDescriptor activationDesc;
     switch(operatorCode)
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index 0b0b13d..057dc8b 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Get const axis value from model and set it to descriptor.
     const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 5f8d5cc..847d6f1 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -50,7 +50,7 @@
     const armnn::TensorInfo& inputTensorInfo      = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
     const armnn::TensorInfo& cropsTensorInfo      = GetTensorInfoForTfLiteTensor(tfLiteCropsTensor);
-    const armnn::TensorInfo& outputTensorInfo     = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo     = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
     ::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
@@ -141,7 +141,7 @@
     const armnn::TensorInfo& inputTensorInfo      = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
     const armnn::TensorInfo& padListTensorInfo    = GetTensorInfoForTfLiteTensor(tfLitePadListTensor);
-    const armnn::TensorInfo& outputTensorInfo     = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo     = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
     std::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index ce12e9f..8bf53c7 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -59,7 +59,7 @@
 
     const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
     const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
     switch(tfLiteComparisonOperatorCode)
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 7e02de1..f04245b 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -115,7 +115,7 @@
         SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
     }
 
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Check if supported
     bool isSupported = false;
@@ -232,7 +232,7 @@
 
     const armnn::TensorInfo& inputTensorInfo =  GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& axisTensorInfo =   GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
 
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 679f4db..93da4c8 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -98,7 +98,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
@@ -163,14 +163,17 @@
     // Set up filter and biases
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
 
-    auto filter =
-        CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
-                          filterTensorInfo,
-                          armnn::Optional<armnn::PermutationVector&>());
+    if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
+    {
+        auto filter =
+                CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
+                                  filterTensorInfo,
+                                  armnn::Optional<armnn::PermutationVector &>());
 
-    armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
-    weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
-    weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+        armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+        weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+    }
 
     if (biasEnabled)
     {
@@ -255,7 +258,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
@@ -449,7 +452,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
@@ -494,9 +497,6 @@
         biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
     }
 
-    // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
-    auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
-
     if (!delegateData.m_Network)
     {
         bool isSupported = false;
@@ -508,16 +508,22 @@
                                    inputTensorInfo,
                                    outputTensorInfo,
                                    descriptor,
-                                   filter.GetInfo(),
+                                   filterTensorInfo,
                                    armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
         return isSupported ? kTfLiteOk : kTfLiteError;
     }
 
     armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
 
-    armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
-    weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
-    weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+    if(tflite::IsConstantTensor(&tfLiteFilterTensor))
+    {
+        // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
+        auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
+
+        armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+        weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
+        weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
+    }
 
     if (biasEnabled)
     {
@@ -663,7 +669,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
     armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
 
     // TfLite uses NHWC tensors
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 46b2db9..58d8048 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -404,14 +404,16 @@
     }
 }
 
-armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
+armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
 {
     armnn::DataType type = GetDataType(tfLiteTensor);
     armnn::TensorInfo ret;
     auto tensorDimensionSize = tfLiteTensor.dims->size;
     if (tensorDimensionSize == 0)
     {
-        if(tflite::IsConstantTensor(&tfLiteTensor))
+        // If input tensor does not have a shape
+        // assuming that it has 1D tensor
+        if (!isOutput)
         {
             std::vector<unsigned int> safeShape = { 1 };
             bool dimensionsSpecificity[1] = { true };
@@ -419,7 +421,10 @@
                                            safeShape.data(),
                                            dimensionsSpecificity);
             ret = armnn::TensorInfo(tensorShape, type);
-            ret.SetConstant(true);
+            if(tflite::IsConstantTensor(&tfLiteTensor))
+            {
+                ret.SetConstant(true);
+            }
         }
         else
         {
@@ -652,4 +657,15 @@
            (armnn::DataType::Signed32 == outputInfo.GetDataType());
 }
 
+void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
+{
+    // If input tensor info is constant and output tensor info shape is not specified
+    // set the output shape from input shape
+    if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+    {
+        outputInfo.SetShape(inputInfo.GetShape());
+    }
+    return;
+}
+
 } // namespace anonymous
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 434694c..6e81db4 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -246,7 +246,7 @@
     armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
     armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
 
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     if (!delegateData.m_Network)
     {
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index c65ddef..79d7f82 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -47,7 +47,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
     bool isSupported = false;
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
index 859f5ff..dc30e53 100644
--- a/delegate/src/Fill.hpp
+++ b/delegate/src/Fill.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -52,7 +52,7 @@
     }
 
     armnn::TensorInfo inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::FillDescriptor descriptor;
     switch (tfLiteFillTensor.type)
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index e8c13f2..6677ab9 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -54,7 +54,7 @@
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     armnn::TensorInfo weightsTensorInfo       = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Fully Connected Layer accepts two dimensional weights input
     int32_t weightsDimension = static_cast<int32_t>(weightsTensorInfo.GetNumDimensions());
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 11f3b61..616de7e 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -47,7 +47,7 @@
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
     armnn::GatherDescriptor gatherDescriptor;
     gatherDescriptor.m_Axis = axis;
 
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index b2d7a508..1e12c5c 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -44,7 +44,7 @@
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     if (!delegateData.m_Network)
     {
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index 2e9091b..562b5d3 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,7 +44,7 @@
 
     armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
     armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Setup descriptor and assign operation
     armnn::LogicalBinaryDescriptor desc;
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
index b082db6..253cd21 100644
--- a/delegate/src/Lstm.hpp
+++ b/delegate/src/Lstm.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -149,7 +149,7 @@
                                 || params.m_OutputLayerNormWeights != nullptr);
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     unsigned int batchSize  = inputTensorInfo.GetShape()[0];
     unsigned int outputSize = outputTensorInfo.GetShape()[1];
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index fa1c547..0933552 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::L2NormalizationDescriptor descriptor;
     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
@@ -95,7 +95,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::NormalizationDescriptor descriptor;
     descriptor.m_DataLayout = armnn::DataLayout::NHWC;
diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp
index 2dc266b..458c174 100644
--- a/delegate/src/Pack.hpp
+++ b/delegate/src/Pack.hpp
@@ -60,7 +60,7 @@
         return kTfLiteError;
     }
 
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::StackDescriptor desc;
     desc.m_NumInputs = static_cast<uint32_t>(numInputs);
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index 8428079..daedede 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -59,7 +59,7 @@
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Get the padding data from the input tensor
     auto* paddingData = tflite::GetTensorData<int32_t>(&tfLitepaddingTensor);
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index df8c3db..dfe90cb 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -47,7 +47,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::PoolingAlgorithm poolingAlgorithm;
     switch(tfLitePoolingOperatorCode)
@@ -145,7 +145,7 @@
     }
     // Set the input and output info
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Custom Operators are defined by the name string associated to the operator. Use this to determine
     // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
diff --git a/delegate/src/Prelu.hpp b/delegate/src/Prelu.hpp
index aef70e8..398abaf 100644
--- a/delegate/src/Prelu.hpp
+++ b/delegate/src/Prelu.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -69,7 +69,7 @@
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
     const armnn::TensorInfo& alphaTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAlphaTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     if (!delegateData.m_Network)
     {
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 26a9339..7871375 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -45,9 +45,10 @@
 
         return kTfLiteError;
     }
-
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+    UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
 
     bool isSupported = false;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
@@ -126,7 +127,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     bool isSupported = false;
     auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index ce25f7f..cdae719 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -41,7 +41,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     bool isSupported = false;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
@@ -137,7 +137,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::ReshapeDescriptor reshapeDesc;
     std::vector<int32_t> targetShape;
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index b8db04c..79f2f52 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // Get const axis value from model and set it to descriptor.
     const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index 63f8e68..b59006c 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -86,7 +86,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     std::string layerName("Resize");
 
diff --git a/delegate/src/Round.hpp b/delegate/src/Round.hpp
index 016af11..b920bd5 100644
--- a/delegate/src/Round.hpp
+++ b/delegate/src/Round.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -41,7 +41,7 @@
     // NOTE: looks like the outputTensorInfo is the only thing that is required for the case
     //       where we are adding the floor layer so maybe move the other stuff inside the
     //       if !delegateData block for efficiency.
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
     // support for the operator
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index 51aa8f1..284dc9f 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -39,7 +39,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(tfLiteNode->builtin_data);
     if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index 04b19cb..6e355ae 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -111,7 +111,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(*tfLiteInputs[0]);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     bool isSupported = false;
     auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index f257091..efc1cba 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -82,7 +82,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
 
     if (!delegateData.m_Network)
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index ac162fb..593d0e7 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::SpaceToDepthDescriptor descriptor;
     auto* params = reinterpret_cast<TfLiteSpaceToDepthParams*>(tfLiteNode->builtin_data);
@@ -95,7 +95,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     armnn::DepthToSpaceDescriptor descriptor;
     auto* params = reinterpret_cast<TfLiteDepthToSpaceParams*>(tfLiteNode->builtin_data);
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index 66e2295..a535585 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -70,7 +70,7 @@
         {
             return kTfLiteError;
         }
-        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor));
+        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
     }
     const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
 
@@ -224,7 +224,7 @@
         {
             return kTfLiteError;
         }
-        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor));
+        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
     }
     const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
 
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 0741ad2..80bb122 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -58,7 +58,7 @@
     }
 
     const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     auto* permTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteInputTensor1);
     unsigned int numEl = tfLiteInputTensor1.dims->data[0];
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
index 1a02a0c..64ed778 100644
--- a/delegate/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/src/UnidirectionalSequenceLstm.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -182,7 +182,7 @@
         desc.m_HiddenStateZeroPoint = hiddentensorInfo.GetQuantizationOffset();
     }
     const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
-    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
     unsigned int batchSize  = inputTensorInfo.GetShape()[0];
     unsigned int outputSize = outputTensorInfo.GetShape()[2];
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
index 447a4da..aaea005 100644
--- a/delegate/src/Unpack.hpp
+++ b/delegate/src/Unpack.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -114,7 +114,7 @@
         {
             return kTfLiteError;
         }
-        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor));
+        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true));
     }
     const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());