IVGCVSW-6420: Constant flag in tensor info is not set correctly

!android-nn-driver:6532
!armnn-internal-tests:372451

  * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to
    throw InvalidArgumentException when TensorInfo isConstant parameter
    is false.
  * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data()
    using template<typename MemoryType>.
  * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and
    called submethods to return TensorInfo& rather than TensorInfo.
  * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any
    ConstTensor created has it's TensorInfo isConstant set to true.
  * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors
    throw InvalidArgumentException when TensorInfo isConstat parameter is
    false.
  * Added unit test to ensure an empty ConstTensor constructor will set
    TensorInfo isConatant to true.
  * Indentation fixes.
  * Fix to arm_tensor.i to add isConstant parameter to TensorInfo
    constructor. Added methods IsConstant() and SetConstant().
  * Fix to const_tensor.py to throw ValueError when TensorInfo
    isConstant is set to false when constructing a ConstTensor.
  * Fixed PyArmnn unit tests to set TensorInfo isConstant to
    True when ConstTensor is used.
  * Added unit tests in test_const_tensor.py to ensure ConstTensor
    constructors throw ValueError when TensorInfo isConstat parameter
    is false.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index fe68193..fcdad3e 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -33,7 +33,7 @@
     {
         ::memcpy(permuteBuffer, tensor->GetConstTensor<void>(), tensorInfo.GetNumBytes());
     }
-
+    tensorInfo.SetConstant(true);
     return ConstTensor(tensorInfo, permuteBuffer);
 }
 
diff --git a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
index 0b1bf77..f7d4596 100644
--- a/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationEndToEndTestImpl.hpp
@@ -127,7 +127,7 @@
 
     float qScale = 1.0f;
     int32_t qOffset = 0;
-    armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+    armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
     armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
 
     armnn::ActivationDescriptor descriptor(ActivationFunction::Elu, 1.0);
@@ -156,7 +156,7 @@
 
     float qScale = 1.0f;
     int32_t qOffset = 0;
-    armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
+    armnn::TensorInfo inputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset, true);
     armnn::TensorInfo outputInfo({ 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
 
     armnn::ActivationDescriptor descriptor(ActivationFunction::HardSwish, 1.0);
diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
index 2ffe06f..041f9f8 100644
--- a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
@@ -47,7 +47,7 @@
     const float qScale  = armnn::IsQuantizedType<T>() ? 2.0f : 1.0f;
     const int32_t qOffset = armnn::IsQuantizedType<T>() ? 2 : 0;
 
-    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset, true);
     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
 
     // quantize data
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index 254b3c2..859694c 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -30,7 +30,7 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
     TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
 
     BatchToSpaceNdDescriptor batchToSpaceNdDesc(blockShape, crops);
diff --git a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
index 9ec7644..7d46be7 100644
--- a/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ChannelShuffleEndToEndTestImpl.hpp
@@ -37,6 +37,7 @@
 
     inputInfo.SetQuantizationScale(1.0f);
     inputInfo.SetQuantizationOffset(0);
+    inputInfo.SetConstant(true);
     outputInfo.SetQuantizationScale(1.0f);
     outputInfo.SetQuantizationOffset(0);
 
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index 40e3fd6..e274163 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -35,7 +35,7 @@
 
     for (unsigned int i = 0; i < inputShapes.size(); ++i)
     {
-        TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset);
+        TensorInfo inputTensorInfo(inputShapes[i], ArmnnTypeInput, qScale, qOffset, true);
         IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
         Connect(input, comparisonLayer, inputTensorInfo, 0, i);
     }
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index 5b2f33f..62f0e4c 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -39,7 +39,7 @@
 
     for (unsigned int i = 0; i < inputShapes.size(); ++i)
     {
-        TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
+        TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset, true);
         IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(i));
         Connect(input, concat, inputTensorInfo, 0, i);
     }
diff --git a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
index 33bf9a1..b1f685b 100644
--- a/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Convolution3dEndToEndTestImpl.hpp
@@ -56,7 +56,7 @@
     const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
     const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
 
-    TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputInfo({ 1, 5, 5, 5, 1 }, ArmnnType, qScale, qOffset, true);
     TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, ArmnnType, qScale, qOffset);
     TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, ArmnnType, qScale, qOffset, true);
     TensorInfo biasesInfo({ 1 }, ArmnnBType, qScale * qScale, 0, true);
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index ea99729..0a4c29b 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -120,7 +120,7 @@
 
 TEST_CASE("TestAsyncExecute")
 {
-    TensorInfo info({5}, DataType::Signed32);
+    TensorInfo info({5}, DataType::Signed32, 0.0, 0, true);
 
     int inVals[5]{2, 2, 2, 2, 2};
     int outVals[5]{1, 1, 1, 1, 1};
@@ -157,7 +157,7 @@
 
 TEST_CASE("TestDefaultAsyncExecute")
 {
-    TensorInfo info({5}, DataType::Signed32);
+    TensorInfo info({5}, DataType::Signed32, 0.0f, 0, true);
 
     std::vector<int> inVals{2, 2, 2, 2, 2};
     std::vector<int> outVals{1, 1, 1, 1, 1};
@@ -193,7 +193,7 @@
 {
     // Use a large vector so the threads have a chance to interact
     unsigned int vecSize = 1000;
-    TensorInfo info({vecSize}, DataType::Signed32);
+    TensorInfo info({vecSize}, DataType::Signed32, 0.0f, 0, true);
 
     std::vector<int> inVals1(vecSize, 2);
     std::vector<int> outVals1(vecSize, 1);
diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
index c6176ae..b64e618 100644
--- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
@@ -44,6 +44,7 @@
     using namespace armnn;
 
     TensorInfo inputInfo(nhwcInputShape, ArmnnType);
+    inputInfo.SetConstant(true);
     TensorInfo outputInfo(nhwcOutputShape, ArmnnType);
 
     constexpr float   qScale  = 0.25f;
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index a5e2fac..fff4c4f 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -43,6 +43,7 @@
 
     inputInfo.SetQuantizationScale(scale);
     inputInfo.SetQuantizationOffset(offset);
+    inputInfo.SetConstant(true);
 
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index a566964..c448886 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -82,10 +82,13 @@
 
     boxEncodingsInfo.SetQuantizationScale(boxScale);
     boxEncodingsInfo.SetQuantizationOffset(boxOffset);
+    boxEncodingsInfo.SetConstant(true);
     scoresInfo.SetQuantizationScale(scoreScale);
     scoresInfo.SetQuantizationOffset(scoreOffset);
+    scoresInfo.SetConstant(true);
     anchorsInfo.SetQuantizationScale(anchorScale);
     anchorsInfo.SetQuantizationOffset(anchorOffset);
+    anchorsInfo.SetConstant(true);
 
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateDetectionPostProcessNetwork<T>(boxEncodingsInfo, scoresInfo,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 046ee3a..0d9d3dd 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1594,10 +1594,12 @@
     std::vector<float> expectedOutputData{ 15.0f, 11.0f };
     std::vector<float> outputData(2);
 
+    TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
+    inputTensorInfo.SetConstant(true);
     InputTensors inputTensors
         {
-            {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input0Data.data())},
-            {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}
+            {0,armnn::ConstTensor(inputTensorInfo, input0Data.data())},
+            {1,armnn::ConstTensor(inputTensorInfo, input1Data.data())}
         };
     OutputTensors outputTensors
         {
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index f958613..635dc96 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -33,7 +33,7 @@
     ElementwiseUnaryDescriptor descriptor(operation);
     IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
 
-    TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
+    TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset, true);
     IConnectableLayer* input = net->AddInputLayer(armnn::numeric_cast<LayerBindingId>(0));
     Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
 
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 2d268f8..269a460 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -79,7 +79,8 @@
 
 inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
 {
-    const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+    TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+    commonTensorInfo.SetConstant(true);
 
     return ConstantUsageTest(backends,
         commonTensorInfo,
@@ -98,6 +99,7 @@
 
     commonTensorInfo.SetQuantizationScale(scale);
     commonTensorInfo.SetQuantizationOffset(offset);
+    commonTensorInfo.SetConstant(true);
 
     return ConstantUsageTest(backends,
         commonTensorInfo,
@@ -198,7 +200,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
@@ -263,7 +265,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
@@ -334,7 +336,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
@@ -418,7 +420,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
@@ -449,6 +451,7 @@
     };
 
     INFO("Create Network");
+
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -507,7 +510,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
@@ -536,6 +539,7 @@
     };
 
     INFO("Create Network");
+
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -594,7 +598,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
@@ -624,6 +628,7 @@
     };
 
     INFO("Create Network");
+
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -685,7 +690,7 @@
     activation->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
     activation->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32, 0.0f, 0, true));
     activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
 
     // Optimize the network
@@ -794,7 +799,7 @@
     input->GetOutputSlot(0).Connect(stridedSlice->GetInputSlot(0));
     stridedSlice->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32, 0.0f, 0, true));
     stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32));
 
     // Attempt to optimize the network and check that the correct exception is thrown
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 2a4ccb6..27e5aa0 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -52,7 +52,7 @@
     };
     std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData);
 
-    TensorInfo inputInfo ({ 4 }, DataType::Signed32);
+    TensorInfo inputInfo ({ 4 }, DataType::Signed32, 0.0f, 0, true);
     TensorInfo outputInfo({ 1, 1, 5, 3 }, ArmnnType);
 
     armnn::INetworkPtr network = CreateFillNetwork(inputInfo, outputInfo, descriptor);
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index f9bdfde..878b6af 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -166,6 +166,7 @@
     armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 3 }, ArmnnType);
     inputTensorInfo.SetQuantizationScale(0.1f);
     inputTensorInfo.SetQuantizationOffset(63);
+    inputTensorInfo.SetConstant(true);
 
     armnn::TensorInfo outputTensorInfo({ 1, 2 }, ArmnnType);
     outputTensorInfo.SetQuantizationScale(5.f);
@@ -174,6 +175,7 @@
     armnn::TensorInfo weightsTensorInfo({ 2, 6 }, ArmnnType);
     weightsTensorInfo.SetQuantizationScale(0.2f);
     weightsTensorInfo.SetQuantizationOffset(93);
+    weightsTensorInfo.SetConstant(true);
 
     FullyConnectedDescriptor descriptor;
     descriptor.m_ConstantWeights = false;
@@ -236,10 +238,10 @@
 
     unsigned int biasShape[] = { outputChannels };
 
-    armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
-    armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
-    armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+    armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true);
+    armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true);
 
     std::vector<float> input =
     {
@@ -352,10 +354,10 @@
 
     unsigned int biasShape[] = { outputChannels };
 
-    armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32, 0.0f, 0, true);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
-    armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
-    armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+    armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32, 0.0f, 0, true);
+    armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32, 0.0f, 0, true);
 
     std::vector<float> weights =
     {
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index 431ef31..4c67ec2 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -42,6 +42,8 @@
 
     paramsInfo.SetQuantizationScale(1.0f);
     paramsInfo.SetQuantizationOffset(0);
+    paramsInfo.SetConstant(true);
+    indicesInfo.SetConstant(true);
     outputInfo.SetQuantizationScale(1.0f);
     outputInfo.SetQuantizationOffset(0);
 
@@ -78,6 +80,8 @@
 
     paramsInfo.SetQuantizationScale(1.0f);
     paramsInfo.SetQuantizationOffset(0);
+    paramsInfo.SetConstant(true);
+    indicesInfo.SetConstant(true);
     outputInfo.SetQuantizationScale(1.0f);
     outputInfo.SetQuantizationOffset(0);
 
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index d758137..e715e6b 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -36,7 +36,7 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
 
     InstanceNormalizationDescriptor instanceNormalizationDesc;
     instanceNormalizationDesc.m_Gamma = gamma;
@@ -104,7 +104,7 @@
     const float gamma     = 1.0f;
 
     TensorShape inputShape{2, 2, 2, 2};
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     TensorShape outputShape{2, 2, 2, 2};
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -174,7 +174,7 @@
     const float gamma     = 1.0f;
 
     TensorShape inputShape{2, 2, 2, 2};
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     TensorShape outputShape{2, 2, 2, 2};
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -248,7 +248,7 @@
     TensorShape outputShape{2, 2, 2, 2};
 
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     std::vector<float> inputData = std::vector<float>(
     {
@@ -319,7 +319,7 @@
     TensorShape outputShape{2, 2, 2, 2};
 
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     std::vector<float> inputData = std::vector<float>(
         {
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 94855aa..226e2b3 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -177,9 +177,11 @@
         };
     std::vector<uint8_t> outputData(5);
 
+    TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+    inputTensorInfo2.SetConstant(true);
     armnn::InputTensors inputTensors
         {
-            {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+            {0, armnn::ConstTensor(inputTensorInfo2, inputData.data())}
         };
     armnn::OutputTensors outputTensors
         {
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index 1f7f578..181ecd9 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -27,7 +27,7 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
 
     LogSoftmaxDescriptor logSoftmaxDesc;
     logSoftmaxDesc.m_Beta = beta;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index b0ee9be..6eecaab 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -370,8 +370,8 @@
     const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 2, 2, 1 }, armnn::DataType::Float32);
 
-    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+    const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+    const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
 
     std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
     armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -443,10 +443,12 @@
     std::vector<float> inputData = GenerateRandomData<float>(runtime->GetInputTensorInfo(optNetId, 0).GetNumElements());
     std::vector<float> outputData(runtime->GetOutputTensorInfo(optNetId, 0).GetNumElements());
 
+    armnn::TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(optNetId, 0);
+    inputTensorInfo.SetConstant(true);
     armnn::InputTensors inputTensors
     {
         {
-            0 ,armnn::ConstTensor(runtime->GetInputTensorInfo(optNetId, 0), inputData.data())
+            0, armnn::ConstTensor(inputTensorInfo, inputData.data())
         }
     };
     armnn::OutputTensors outputTensors
@@ -464,10 +466,12 @@
         armnn::NetworkId netId = networkIds[i];
         std::vector<float> copyOutputData(runtime->GetOutputTensorInfo(netId, 0).GetNumElements());
 
+        armnn::TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
+        inputTensorInfo2.SetConstant(true);
         armnn::InputTensors copyInputTensors
         {
             {
-                0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())
+                0, armnn::ConstTensor(inputTensorInfo2, inputData.data())
             }
         };
         armnn::OutputTensors copyOutputTensors
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index e11553d..c31d084 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -51,8 +51,10 @@
 
     inputInfo.SetQuantizationOffset(qOffset);
     inputInfo.SetQuantizationScale(qScale);
+    inputInfo.SetConstant(true);
     alphaInfo.SetQuantizationOffset(qOffset);
     alphaInfo.SetQuantizationScale(qScale);
+    alphaInfo.SetConstant(true);
     outputInfo.SetQuantizationOffset(qOffset);
     outputInfo.SetQuantizationScale(qScale);
 
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index 281bed1..e2147fc 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -80,22 +80,22 @@
     const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
                                              armnn::DataType::QSymmS8,
                                              weightsScale,
-                                             weightsOffset);
+                                             weightsOffset, true);
 
     const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
                                                  armnn::DataType::QSymmS8,
                                                  weightsScale,
-                                                 weightsOffset);
+                                                 weightsOffset, true);
 
     const armnn::TensorInfo biasInfo({outputSize},
                                      armnn::DataType::Signed32,
                                      biasScale,
-                                     biasOffset);
+                                     biasOffset, true);
 
     const armnn::TensorInfo layerNormWeightsInfo({numUnits},
                                                  armnn::DataType::QSymmS16,
                                                  layerNormScale,
-                                                 layerNormOffset);
+                                                 layerNormOffset, true);
 
     // Mandatory params
     const std::vector<int8_t> inputToForgetWeightsVector =
@@ -179,17 +179,17 @@
     const armnn::TensorInfo inputInfo({numBatches , inputSize},
                                       armnn::DataType::QAsymmS8,
                                       inputScale,
-                                      inputOffset);
+                                      inputOffset, true);
 
     const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
                                           armnn::DataType::QSymmS16,
                                           cellStateScale,
-                                          cellStateOffset);
+                                          cellStateOffset, true);
 
     const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
                                             armnn::DataType::QAsymmS8,
                                             outputScale,
-                                            outputOffset);
+                                            outputOffset, true);
 
     // Input tensor data
     const std::vector<int8_t> inputVector         = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index a2fadc7..f178951 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -46,14 +46,14 @@
     armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
                                        armnn::DataType::QAsymmU8,
                                        weightsScale,
-                                       weightsOffset);
+                                       weightsOffset, true);
 
     armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
                                            armnn::DataType::QAsymmU8,
                                            weightsScale,
-                                           weightsOffset);
+                                           weightsOffset, true);
 
-    armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
+    armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true);
 
     armnn::QuantizedLstmInputParams data;
 
@@ -210,9 +210,16 @@
     inputTensors.reserve(3);
 
     // input
-    inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
-    inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), cellStateInVector.data())});
-    inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), outputStateInVector.data())});
+    TensorInfo inputTensorInfo0 = runtime->GetInputTensorInfo(netId, 0);
+    TensorInfo inputTensorInfo1 = runtime->GetInputTensorInfo(netId, 1);
+    TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 2);
+    inputTensorInfo0.SetConstant(true);
+    inputTensorInfo1.SetConstant(true);
+    inputTensorInfo2.SetConstant(true);
+
+    inputTensors.push_back({0, ConstTensor(inputTensorInfo0, inputVector.data())});
+    inputTensors.push_back({1, ConstTensor(inputTensorInfo1, cellStateInVector.data())});
+    inputTensors.push_back({2, ConstTensor(inputTensorInfo2, outputStateInVector.data())});
 
     OutputTensors outputTensors;
     outputTensors.reserve(2);
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index 461b3b9..5229c47 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -46,7 +46,7 @@
 
     std::vector<int32_t> expectedOutputData{ 4 };
 
-    TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType);
+    TensorInfo inputInfo ({ 1, 1, 5, 3 }, ArmnnType, 0.0f, 0, true);
     TensorShape outputShape (Dimensionality::Scalar);
     TensorInfo outputInfo(outputShape, DataType::Signed32);
 
diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
index aa7af11..a56db44 100644
--- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
@@ -57,7 +57,7 @@
     const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
     const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
 
-    TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+    TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
     TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
 
     std::vector<float> inputData =
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index 4e5baad..e3b016e 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -34,7 +34,7 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
 
     armnnUtils::DataLayoutIndexed dimensionIndices(dataLayout);
     if (inputShape[dimensionIndices.GetHeightIndex()] % blockSize!=0
@@ -102,7 +102,7 @@
     const unsigned int blockSize = 2;
 
     TensorShape inputShape{1, 2, 2, 1};
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     TensorShape outputShape{1, 1, 1, 4};
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -133,7 +133,7 @@
     const unsigned int blockSize = 2;
 
     TensorShape inputShape{1, 2, 2, 1};
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     TensorShape outputShape{1, 1, 1, 4};
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
@@ -167,7 +167,7 @@
     TensorShape outputShape{1, 1, 1, 8};
 
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
 
     std::vector<float> inputData = std::vector<float>(
     {
@@ -197,7 +197,7 @@
     TensorShape inputShape{1, 2, 2, 2};
     TensorShape outputShape{1, 1, 1, 8};
 
-    TensorInfo inputTensorInfo(inputShape, DataType::Float32);
+    TensorInfo inputTensorInfo(inputShape, DataType::Float32, 0.0f, 0, true);
     TensorInfo outputTensorInfo(outputShape, DataType::Float32);
 
 
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 64e24e5..3a2af68 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -31,7 +31,7 @@
     // Builds up the structure of the network.
     INetworkPtr net(INetwork::Create());
 
-    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
+    TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true);
 
     std::vector<unsigned int> splitterDimSizes(inputShape.GetNumDimensions());
 
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 764983f..8ef5ecc 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -62,8 +62,10 @@
         inputTensors.reserve(inputTensorData.size());
         for (auto&& it : inputTensorData[i])
         {
+            TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first);
+            inputTensorInfo.SetConstant(true);
             inputTensors.push_back({it.first,
-                                    ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
+                                    ConstTensor(inputTensorInfo, it.second.data())});
         }
 
         outputTensors.reserve(expectedOutputData.size());
@@ -146,8 +148,10 @@
     inputTensors.reserve(inputTensorData.size());
     for (auto&& it : inputTensorData)
     {
+        TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkId, it.first);
+        inputTensorInfo.SetConstant(true);
         inputTensors.push_back({it.first,
-                                ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
+                                ConstTensor(inputTensorInfo, it.second.data())});
     }
 
     OutputTensors outputTensors;
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index 133829c..8f10869 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -68,10 +68,10 @@
     const float   qScale  = IsQuantizedType<T>() ? 0.25f : 1.0f;
     const int32_t qOffset = IsQuantizedType<T>() ? 50    : 0;
 
-    TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset);
+    TensorInfo inputInfo(inputShape, ArmnnType, qScale, qOffset, true);
     TensorInfo outputInfo(outputShape, ArmnnType, qScale, qOffset);
-    TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset);
-    TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0);
+    TensorInfo weightsInfo(weightsShape, ArmnnType, qScale, qOffset, true);
+    TensorInfo biasesInfo({ channels }, ArmnnBType, qScale * qScale, 0, true);
 
     std::vector<float> inputData =
     {