IVGCVSW-6420: Constant flag in tensor info is not set correctly

!android-nn-driver:6532
!armnn-internal-tests:372451

  * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to
    throw InvalidArgumentException when TensorInfo isConstant parameter
    is false.
  * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data()
    using template<typename MemoryType>.
  * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and
    called submethods to return TensorInfo& rather than TensorInfo.
  * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any
    ConstTensor created has it's TensorInfo isConstant set to true.
  * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors
    throw InvalidArgumentException when TensorInfo isConstat parameter is
    false.
  * Added unit test to ensure an empty ConstTensor constructor will set
    TensorInfo isConatant to true.
  * Indentation fixes.
  * Fix to arm_tensor.i to add isConstant parameter to TensorInfo
    constructor. Added methods IsConstant() and SetConstant().
  * Fix to const_tensor.py to throw ValueError when TensorInfo
    isConstant is set to false when constructing a ConstTensor.
  * Fixed PyArmnn unit tests to set TensorInfo isConstant to
    True when ConstTensor is used.
  * Added unit tests in test_const_tensor.py to ensure ConstTensor
    constructors throw ValueError when TensorInfo isConstat parameter
    is false.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 2d268f8..269a460 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -79,7 +79,8 @@
 
 inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
 {
-    const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+    TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+    commonTensorInfo.SetConstant(true);
 
     return ConstantUsageTest(backends,
         commonTensorInfo,
@@ -98,6 +99,7 @@
 
     commonTensorInfo.SetQuantizationScale(scale);
     commonTensorInfo.SetQuantizationOffset(offset);
+    commonTensorInfo.SetConstant(true);
 
     return ConstantUsageTest(backends,
         commonTensorInfo,
@@ -198,7 +200,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
@@ -263,7 +265,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
@@ -334,7 +336,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // Optimize the network
@@ -418,7 +420,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
@@ -449,6 +451,7 @@
     };
 
     INFO("Create Network");
+
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -507,7 +510,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     // optimize the network
@@ -536,6 +539,7 @@
     };
 
     INFO("Create Network");
+
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -594,7 +598,7 @@
     input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
     pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
     pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
 
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
@@ -624,6 +628,7 @@
     };
 
     INFO("Create Network");
+
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -685,7 +690,7 @@
     activation->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
     activation->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32, 0.0f, 0, true));
     activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
 
     // Optimize the network
@@ -794,7 +799,7 @@
     input->GetOutputSlot(0).Connect(stridedSlice->GetInputSlot(0));
     stridedSlice->GetOutputSlot(0).Connect(output0->GetInputSlot(0));
 
-    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32));
+    input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 2, 3 }, DataType::Float32, 0.0f, 0, true));
     stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32));
 
     // Attempt to optimize the network and check that the correct exception is thrown