IVGCVSW-7454 Enable NonConstWeights in GpuAcc

* Set flag for constant weights and bias in ACL tensorInfo in ACl workloads
* Set flag for constant weights and bias in Unit Tests
* Add to dot file for FullyConnected layer the constantWeights flag

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I87e1fef516ce4a8a59245dfdf7d92c153418e1d6
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index 8b0644b..a0e5a1e 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -213,7 +213,7 @@
     auto clCapabilities = clBackend->GetCapabilities();
 
     CapabilityTestHelper(clCapabilities,
-                         {{"NonConstWeights", false},
+                         {{"NonConstWeights", true},
                           {"AsyncExecution", false},
                           {"ProtectedContentAllocation", true},
                           {"ConstantTensorsAsInputs", true},
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 8a2d8c8..69a04df 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -246,7 +246,9 @@
             armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
     armnn::TensorInfo kernelDesc =
             armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -412,10 +414,11 @@
 
     // Creates the tensors.
     armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
-    armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
-                                       OutType);
+    armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, OutType);
     armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Construct the input data.
     std::vector<T> inputData;
@@ -432,11 +435,6 @@
     std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
     std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
 
-//    armnn::ScopedTensorHandle weightsTensor(kernelDesc);
-//    AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
-//    armnn::ScopedTensorHandle biasTensor(biasDesc);
-
     armnn::Convolution2dQueueDescriptor data;
 
     data.m_Parameters.m_StrideX = strideX;
@@ -512,7 +510,9 @@
     armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
     armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
     armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
+    kernelInfo.SetConstant(true);
     armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
+    biasInfo.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -578,11 +578,6 @@
 
     armnn::Convolution2dQueueDescriptor data;
     armnn::WorkloadInfo info;
-//    armnn::ScopedTensorHandle weightsTensor(kernelInfo);
-//    armnn::ScopedTensorHandle biasTensor(biasInfo);
-//
-//    AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
-//    AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
 
     AddInputToWorkload(data, info, inputInfo, inputHandle.get());
     AddInputToWorkload(data, info, kernelInfo, weightsHandle.get());
@@ -1390,7 +1385,9 @@
     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
     kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
+    kernelDesc.SetConstant(true);
     biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
+    biasDesc.SetConstant(true);
 
     auto input  = MakeRandomTensor<T>(inputTensorInfo, 124908);
     auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
@@ -1730,7 +1727,9 @@
     armnn::TensorInfo outputTensorInfo =
             armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
     armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if (armnn::IsQuantizedType<T>())
@@ -1874,9 +1873,10 @@
             armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
     armnn::TensorInfo outputTensorInfo =
             armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
-    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
-                                 ArmnnType);
+    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -2047,9 +2047,10 @@
             inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
             outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
-    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
-                                 ArmnnType);
+    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -2291,8 +2292,9 @@
 
     // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
     armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
-
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -3084,8 +3086,8 @@
 
     inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
     outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
-    kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
-    biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
+    kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset, true);
+    biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset, true);
 
     auto input  = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
     auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
@@ -3575,10 +3577,10 @@
     const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
     constexpr unsigned int quantDimension = 0;
 
-    TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
+    TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension, true);
 
     const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
-    TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
+    TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension, true);
 
     std::vector<uint8_t> inputData =
     {
@@ -3625,11 +3627,6 @@
     std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
 
     WorkloadInfo workloadInfo;
-//    ScopedTensorHandle weightTensor(kernelInfo);
-//    ScopedTensorHandle biasTensor(biasInfo);
-//
-//    AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
-//    AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
 
     Convolution2dQueueDescriptor queueDescriptor;
     queueDescriptor.m_Parameters = descriptor;
@@ -3843,11 +3840,11 @@
 
     const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
     const unsigned int quantDimension = 3;
-    TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension); // [1, H, W, I*M]
+    TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension, true); // [1, H, W, I*M]
 
     const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
     constexpr unsigned int biasQuantDimension = 0;
-    TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
+    TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension, true);
 
     std::vector<uint8_t> inputData =
     {
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 35496ce..4b97636 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -111,10 +111,12 @@
     armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
     weightsDesc.SetQuantizationScale(0.2f);
     weightsDesc.SetQuantizationOffset(93);
+    weightsDesc.SetConstant(constantWeights);
 
     armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
     biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
     biasesDesc.SetQuantizationOffset(0);
+    biasesDesc.SetConstant(true);
 
     LayerTestResult<T, 2> result(outputTensorInfo);
 
@@ -183,6 +185,9 @@
     unsigned int outputChannels = 1;
     unsigned int outputNum = 1;
 
+    bool isBiasEnabled = true;
+    bool isConstantWeights = true;
+
     // Define the tensor descriptors.
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -203,6 +208,8 @@
     outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
     weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
     biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
+    weightsDesc.SetConstant(isConstantWeights);
+    biasesDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -236,7 +243,7 @@
         inputTensorInfo, outputTensorInfo,
         weightsDesc, biasesDesc,
         weights, biasValues, input,
-        true, transposeWeights, true
+        isBiasEnabled, transposeWeights, isConstantWeights
     );
 
     result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
@@ -283,6 +290,8 @@
     unsigned int outputChannels = 3;
     unsigned int outputNum = 2;
 
+    bool isConstantWeights = true;
+
     // Define the tensor descriptors.
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -304,6 +313,8 @@
     outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
     weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
     biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+    weightsDesc.SetConstant(isConstantWeights);
+    biasesDesc.SetConstant(true);
 
     LayerTestResult<float, 2> result(outputTensorInfo);
 
@@ -345,7 +356,7 @@
         inputTensorInfo, outputTensorInfo,
         weightsDesc, biasesDesc,
         weights, biasValues, input,
-        biasEnabled, transposeWeights, true
+        biasEnabled, transposeWeights, isConstantWeights
     );
 
     std::vector<float> expectedOutput =