IVGCVSW-7454 Enable NonConstWeights in GpuAcc

* Set flag for constant weights and bias in ACL tensorInfo in ACl workloads
* Set flag for constant weights and bias in Unit Tests
* Add to dot file for FullyConnected layer the constantWeights flag

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I87e1fef516ce4a8a59245dfdf7d92c153418e1d6
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index 8b0644b..a0e5a1e 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -213,7 +213,7 @@
     auto clCapabilities = clBackend->GetCapabilities();
 
     CapabilityTestHelper(clCapabilities,
-                         {{"NonConstWeights", false},
+                         {{"NonConstWeights", true},
                           {"AsyncExecution", false},
                           {"ProtectedContentAllocation", true},
                           {"ConstantTensorsAsInputs", true},
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 8a2d8c8..69a04df 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -246,7 +246,9 @@
             armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
     armnn::TensorInfo kernelDesc =
             armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -412,10 +414,11 @@
 
     // Creates the tensors.
     armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
-    armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
-                                       OutType);
+    armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, OutType);
     armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Construct the input data.
     std::vector<T> inputData;
@@ -432,11 +435,6 @@
     std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
     std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
 
-//    armnn::ScopedTensorHandle weightsTensor(kernelDesc);
-//    AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
-
-//    armnn::ScopedTensorHandle biasTensor(biasDesc);
-
     armnn::Convolution2dQueueDescriptor data;
 
     data.m_Parameters.m_StrideX = strideX;
@@ -512,7 +510,9 @@
     armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
     armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
     armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
+    kernelInfo.SetConstant(true);
     armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
+    biasInfo.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -578,11 +578,6 @@
 
     armnn::Convolution2dQueueDescriptor data;
     armnn::WorkloadInfo info;
-//    armnn::ScopedTensorHandle weightsTensor(kernelInfo);
-//    armnn::ScopedTensorHandle biasTensor(biasInfo);
-//
-//    AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
-//    AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
 
     AddInputToWorkload(data, info, inputInfo, inputHandle.get());
     AddInputToWorkload(data, info, kernelInfo, weightsHandle.get());
@@ -1390,7 +1385,9 @@
     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
     kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
+    kernelDesc.SetConstant(true);
     biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
+    biasDesc.SetConstant(true);
 
     auto input  = MakeRandomTensor<T>(inputTensorInfo, 124908);
     auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
@@ -1730,7 +1727,9 @@
     armnn::TensorInfo outputTensorInfo =
             armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
     armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if (armnn::IsQuantizedType<T>())
@@ -1874,9 +1873,10 @@
             armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
     armnn::TensorInfo outputTensorInfo =
             armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
-    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
-                                 ArmnnType);
+    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -2047,9 +2047,10 @@
             inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
             outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
-    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
-                                 ArmnnType);
+    armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType);
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -2291,8 +2292,9 @@
 
     // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
     armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
-
+    kernelDesc.SetConstant(true);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
+    biasDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -3084,8 +3086,8 @@
 
     inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
     outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
-    kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
-    biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
+    kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset, true);
+    biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset, true);
 
     auto input  = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
     auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
@@ -3575,10 +3577,10 @@
     const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
     constexpr unsigned int quantDimension = 0;
 
-    TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
+    TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension, true);
 
     const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
-    TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
+    TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension, true);
 
     std::vector<uint8_t> inputData =
     {
@@ -3625,11 +3627,6 @@
     std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
 
     WorkloadInfo workloadInfo;
-//    ScopedTensorHandle weightTensor(kernelInfo);
-//    ScopedTensorHandle biasTensor(biasInfo);
-//
-//    AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
-//    AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
 
     Convolution2dQueueDescriptor queueDescriptor;
     queueDescriptor.m_Parameters = descriptor;
@@ -3843,11 +3840,11 @@
 
     const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
     const unsigned int quantDimension = 3;
-    TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension); // [1, H, W, I*M]
+    TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension, true); // [1, H, W, I*M]
 
     const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
     constexpr unsigned int biasQuantDimension = 0;
-    TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
+    TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension, true);
 
     std::vector<uint8_t> inputData =
     {
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 35496ce..4b97636 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -111,10 +111,12 @@
     armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
     weightsDesc.SetQuantizationScale(0.2f);
     weightsDesc.SetQuantizationOffset(93);
+    weightsDesc.SetConstant(constantWeights);
 
     armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
     biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
     biasesDesc.SetQuantizationOffset(0);
+    biasesDesc.SetConstant(true);
 
     LayerTestResult<T, 2> result(outputTensorInfo);
 
@@ -183,6 +185,9 @@
     unsigned int outputChannels = 1;
     unsigned int outputNum = 1;
 
+    bool isBiasEnabled = true;
+    bool isConstantWeights = true;
+
     // Define the tensor descriptors.
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -203,6 +208,8 @@
     outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
     weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
     biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
+    weightsDesc.SetConstant(isConstantWeights);
+    biasesDesc.SetConstant(true);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -236,7 +243,7 @@
         inputTensorInfo, outputTensorInfo,
         weightsDesc, biasesDesc,
         weights, biasValues, input,
-        true, transposeWeights, true
+        isBiasEnabled, transposeWeights, isConstantWeights
     );
 
     result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
@@ -283,6 +290,8 @@
     unsigned int outputChannels = 3;
     unsigned int outputNum = 2;
 
+    bool isConstantWeights = true;
+
     // Define the tensor descriptors.
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -304,6 +313,8 @@
     outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
     weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
     biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+    weightsDesc.SetConstant(isConstantWeights);
+    biasesDesc.SetConstant(true);
 
     LayerTestResult<float, 2> result(outputTensorInfo);
 
@@ -345,7 +356,7 @@
         inputTensorInfo, outputTensorInfo,
         weightsDesc, biasesDesc,
         weights, biasValues, input,
-        biasEnabled, transposeWeights, true
+        biasEnabled, transposeWeights, isConstantWeights
     );
 
     std::vector<float> expectedOutput =
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index ce56c30..d276eac 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -24,7 +24,7 @@
 // add new capabilities here..
 const BackendCapabilities gpuAccCapabilities("GpuAcc",
                                              {
-                                                     {"NonConstWeights", false},
+                                                     {"NonConstWeights", true},
                                                      {"AsyncExecution", false},
                                                      {"ProtectedContentAllocation", true},
                                                      {"ConstantTensorsAsInputs", true},
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index c49ca23..51ea0dc 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -410,10 +410,10 @@
     const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
     constexpr unsigned int quantDimension = 0;
 
-    TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
+    TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension, true);
 
     const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
-    TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
+    TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension, true);
 
     std::vector<uint8_t> inputData =
     {
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 1920f2d..d6a72e6 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -88,9 +88,15 @@
     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
     arm_compute::ICLTensor& weights = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    weights.info()->set_are_values_constant(info.m_InputTensorInfos[1].IsConstant());
+
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
         arm_compute::ICLTensor& bias = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+        bias.info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
+        // We do not support dynamic bias
+        ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
+
         m_BiasProxy = std::make_unique<ICLTensorProxy>(&bias);
     }
 
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 041cb8b..e6c9cb5 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -91,28 +91,12 @@
     const arm_compute::CLCompileContext& clCompileContext)
     : ClBaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
 {
-    // Add details for profiling output
-    WorkloadInfo detailsInfo;
-
-    detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
-    detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
-    detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
-    if (descriptor.m_Parameters.m_BiasEnabled)
-    {
-        detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
-    }
-
-    // Report Profiling Details
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClDepthwiseConvolutionWorkload_Construct",
-                                         descriptor.m_Parameters,
-                                         detailsInfo,
-                                         GetGuid());
-
     m_Data.ValidateInputsOutputs("ClDepthwiseConv2dWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
 
     arm_compute::ICLTensor& input = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
     arm_compute::ICLTensor& weights = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    weights.info()->set_are_values_constant(info.m_InputTensorInfos[1].IsConstant());
     arm_compute::ITensorInfo* weightsInfo = weights.info();
     arm_compute::ITensorInfo* inputInfo = input.info();
     auto weightsShape = weightsInfo->tensor_shape();
@@ -127,6 +111,9 @@
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
         bias = &PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+        bias->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
+        // We do not support dynamic bias
+        ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
     }
 
     const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
@@ -158,6 +145,24 @@
                 aclDilationInfo);
     }
     ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
+
+    // Add details for profiling output
+    WorkloadInfo detailsInfo;
+
+    detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
+    detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
+    detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
+
+    if (descriptor.m_Parameters.m_BiasEnabled)
+    {
+        detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
+    }
+
+    // Report Profiling Details
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClDepthwiseConvolutionWorkload_Construct",
+                                         descriptor.m_Parameters,
+                                         detailsInfo,
+                                         GetGuid());
 }
 
 void ClDepthwiseConvolutionWorkload::Execute() const
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 22df04f..1f26b09 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,6 +46,7 @@
 
     const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
         ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
+
     return arm_compute::CLFullyConnectedLayer::validate(&aclInput,
                                                         &aclWeights,
                                                         optionalAclBiases,
@@ -60,34 +61,22 @@
     const arm_compute::CLCompileContext& clCompileContext)
     : ClBaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info), m_FullyConnectedLayer(memoryManager)
 {
-    // Add details for profiling output
-    WorkloadInfo detailsInfo;
+    m_Data.ValidateInputsOutputs("ClFullyConnectedWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
 
-    detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
-    detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
-    detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
-    if (descriptor.m_Parameters.m_BiasEnabled)
-    {
-        detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
-    }
-
-    // Report Profiling Details
-    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClFullyConnectedWorkload_Construct",
-                                         descriptor.m_Parameters,
-                                         detailsInfo,
-                                         this->GetGuid());
-
-    m_Data.ValidateInputsOutputs("ClFullyConnectedWorkload", descriptor.m_Parameters.GetNumInputs(),
-                                 1);
-
-    arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+    arm_compute::ICLTensor& input = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
     arm_compute::ICLTensor& weights = PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
 
+    weights.info()->set_are_values_constant(info.m_InputTensorInfos[1].IsConstant());
+
     arm_compute::ICLTensor* bias  = nullptr;
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
         bias = &PolymorphicDowncast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+        bias->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
+
+        // We do not support dynamic bias
+        ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
     }
 
     const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
@@ -105,6 +94,23 @@
                                         &output,
                                         fc_info);
     }
+
+    // Add details for profiling output
+    WorkloadInfo detailsInfo;
+
+    detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
+    detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
+    detailsInfo.m_WeightsTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[1]);
+    if (descriptor.m_Parameters.m_BiasEnabled)
+    {
+        detailsInfo.m_BiasTensorInfo = armnn::Optional<armnn::TensorInfo>(info.m_InputTensorInfos[2]);
+    }
+
+    // Report Profiling Details
+    ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClFullyConnectedWorkload_Construct",
+                                         descriptor.m_Parameters,
+                                         detailsInfo,
+                                         this->GetGuid());
 }
 
 void ClFullyConnectedWorkload::Execute() const