IVGCVSW-6119 ConstTensorsAsInput: FullyConnected

 * Constant weights and biases are now stored as Constant layers.
 * Updated Serializer, Deserializer and unit tests to reflect this.
 * Updated TfLiteDelegate, TfLiteParser and OnnxParser.
 * Updated Schema with IsConstant and ConstantTensorsAsInputs.
 * Updated Ref backend to handle constant weights and
   bias as inputs rather than reading from member variables.
 * Added dynamic or constant input EndToEnd tests.

!android-nn-driver:5959

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Ibf3cf437df1100e4b322b0d303c575c6339f9696
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 3fe0823..319cdb1 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1041,15 +1041,12 @@
 {
     const std::string descriptorName{"FullyConnectedQueueDescriptor"};
 
-    uint32_t numInputs = 1;
-    if (!m_Parameters.m_ConstantWeights)
+    uint32_t numInputs = 2;
+    if (m_Parameters.m_BiasEnabled)
     {
-        numInputs = 2;
-        if (m_Parameters.m_BiasEnabled)
-        {
-            numInputs = 3;
-        }
+        numInputs = 3;
     }
+
     ValidateNumInputs(workloadInfo, descriptorName, numInputs);
     ValidateNumOutputs(workloadInfo, descriptorName, 1);
 
@@ -1063,30 +1060,12 @@
         throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
     }
 
-    TensorInfo weightTensorInfo;
-    if (m_Parameters.m_ConstantWeights)
-    {
-        ValidatePointer(m_Weight, descriptorName, "weight");
-        weightTensorInfo = m_Weight->GetTensorInfo();
-    }
-    else
-    {
-        weightTensorInfo  = workloadInfo.m_InputTensorInfos[1];
-    }
+    TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
 
     if (m_Parameters.m_BiasEnabled)
     {
-        TensorInfo biasTensorInfo;
-        if (m_Parameters.m_ConstantWeights)
-        {
-            ValidatePointer(m_Bias, descriptorName, "bias");
-            biasTensorInfo = m_Bias->GetTensorInfo();
-        }
-        else
-        {
-            biasTensorInfo  = workloadInfo.m_InputTensorInfos[2];
-        }
+        TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
         // Validates type and quantization values.
         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
@@ -1894,11 +1873,9 @@
     };
 
     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
-
-    if (inputTensorInfo != outputTensorInfo)
-    {
-        throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
-    }
+    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+    ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+    ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1c18551..3f5972d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -36,7 +36,11 @@
         return info;
     }
 
-    return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
+    return TensorInfo(info.GetShape(),
+                      type.value(),
+                      info.GetQuantizationScale(),
+                      info.GetQuantizationOffset(),
+                      info.IsConstant());
 }
 
 } // anonymous namespace
@@ -364,16 +368,7 @@
             TensorInfo weightsInfo;
             const TensorInfo* weightsInfoPtr = nullptr;
 
-            if (descriptor.m_ConstantWeights)
-            {
-                ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
-                weightsInfo = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
-            }
-            else
-            {
-                weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
-
-            }
+            weightsInfo = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(), dataType);
             weightsInfoPtr = &weightsInfo;
 
             TensorInfo biasInfo;
@@ -385,17 +380,8 @@
 
             if (descriptor.m_BiasEnabled)
             {
-                if(descriptor.m_ConstantWeights)
-                {
-                    ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
-                    biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
-                    biasInfoPtr = &biasInfo;
-                }
-                else
-                {
-                    biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
-                    biasInfoPtr = &biasInfo;
-                }
+                biasInfo = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(), dataType);
+                biasInfoPtr = &biasInfo;
             }
             else
             {
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index 923d6f3..af6b568 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -28,10 +28,7 @@
 
     armnn::IConnectableLayer* inputLayer  = network->AddInputLayer(0, "Input");
     armnn::IConnectableLayer* weightsInputLayer   = network->AddInputLayer(1, "Weights_Input");
-    armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor,
-                                                                                    armnn::EmptyOptional(),
-                                                                                    armnn::EmptyOptional(),
-                                                                                    "Fully_Connected");
+    armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
     armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
 
     Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
@@ -41,6 +38,52 @@
     return network;
 }
 
+armnn::INetworkPtr CreateFullyConnectedNetworkNonConstWeightsConstBias(const armnn::TensorInfo& inputTensorInfo,
+                                                                       const armnn::TensorInfo& outputTensorInfo,
+                                                                       const armnn::TensorInfo& weightsTensorInfo,
+                                                                       const armnn::TensorInfo& biasTensorInfo,
+                                                                       const armnn::ConstTensor& biasConstantTensor,
+                                                                       armnn::FullyConnectedDescriptor descriptor)
+{
+    armnn::INetworkPtr network(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* inputLayer  = network->AddInputLayer(0, "Input");
+    armnn::IConnectableLayer* weightsInputLayer   = network->AddInputLayer(1, "Weights_Input");
+    armnn::IConnectableLayer* biasLayer  = network->AddConstantLayer(biasConstantTensor, "Weights");
+    armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
+    armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+    Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
+    Connect(weightsInputLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
+    Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
+    Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
+
+    return network;
+}
+
+armnn::INetworkPtr CreateFullyConnectedNetworkConstWeightsNonConstBias(const armnn::TensorInfo& inputTensorInfo,
+                                                                       const armnn::TensorInfo& outputTensorInfo,
+                                                                       const armnn::TensorInfo& weightsTensorInfo,
+                                                                       const armnn::TensorInfo& biasTensorInfo,
+                                                                       const armnn::ConstTensor& weightsConstantTensor,
+                                                                       armnn::FullyConnectedDescriptor descriptor)
+{
+    armnn::INetworkPtr network(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* inputLayer  = network->AddInputLayer(0, "Input");
+    armnn::IConnectableLayer* weightsLayer  = network->AddConstantLayer(weightsConstantTensor, "Weights");
+    armnn::IConnectableLayer* biasLayer   = network->AddInputLayer(2, "Bias_Input");
+    armnn::IConnectableLayer* fullyConnectedLayer = network->AddFullyConnectedLayer(descriptor, "Fully_Connected");
+    armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+    Connect(inputLayer, fullyConnectedLayer, inputTensorInfo, 0, 0);
+    Connect(weightsLayer, fullyConnectedLayer, weightsTensorInfo, 0, 1);
+    Connect(biasLayer, fullyConnectedLayer, biasTensorInfo, 0, 2);
+    Connect(fullyConnectedLayer, outputLayer, outputTensorInfo, 0, 0);
+
+    return network;
+}
+
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 void FullyConnectedWithDynamicWeightsEndToEnd(const std::vector<armnn::BackendId>& backends)
 {
@@ -94,4 +137,123 @@
                                                 backends,
                                                 1.0f);
 }
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void FullyConnectedWithDynamicOrConstantInputsEndToEnd(const std::vector<armnn::BackendId>& backends,
+                                                       const bool transposeWeights,
+                                                       const bool constantWeightsOrBias)
+{
+    unsigned int inputWidth = 1;
+    unsigned int inputHeight = 1;
+    unsigned int inputChannels = 5;
+    unsigned int inputNum = 2;
+
+    unsigned int outputChannels = 3;
+    unsigned int outputNum = 2;
+
+    unsigned int inputShape[]   = { inputNum, inputChannels, inputHeight, inputWidth };
+    unsigned int outputShape[]  = { outputNum, outputChannels };
+    unsigned int weightsShape[] = { inputChannels, outputChannels };
+
+    if (transposeWeights)
+    {
+        std::swap(weightsShape[0], weightsShape[1]);
+    }
+
+    unsigned int biasShape[] = { outputChannels };
+
+    armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
+    armnn::TensorInfo weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
+    armnn::TensorInfo biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+
+    std::vector<float> input =
+    {
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+        5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+    };
+
+    std::vector<float> weights =
+    {
+        .5f, 2.f, .5f,
+        .5f, 2.f, 1.f,
+        .5f, 2.f, 2.f,
+        .5f, 2.f, 3.f,
+        .5f, 2.f, 4.f
+    };
+
+    if (transposeWeights)
+    {
+        weights =
+        {
+            .5f, .5f, .5f, .5f, .5f,
+            2.f, 2.f, 2.f, 2.f, 2.f,
+            .5f, 1.f, 2.f, 3.f, 4.f
+        };
+    }
+
+    std::vector<float> biasValues = std::vector<float>({10.f, 20.f, 30.f});
+
+    std::vector<float> expectedOutput =
+    {
+        0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
+        2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
+        0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
+
+        2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
+        10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
+        2.5f + 4.0f + 6.0f + 6.f + 4.f   + biasValues[2]
+    };
+
+    FullyConnectedDescriptor descriptor;
+    descriptor.m_BiasEnabled = true;
+    descriptor.m_TransposeWeightMatrix = transposeWeights;
+    descriptor.m_ConstantWeights = constantWeightsOrBias;
+
+    if (!constantWeightsOrBias)
+    {
+        // Tests non constant weights and constant bias.
+        ConstTensor biasConstantTensor(biasesDesc, biasValues.data());
+
+        armnn::INetworkPtr network = CreateFullyConnectedNetworkNonConstWeightsConstBias(inputTensorInfo,
+                                                                                         outputTensorInfo,
+                                                                                         weightsDesc,
+                                                                                         biasesDesc,
+                                                                                         biasConstantTensor,
+                                                                                         descriptor);
+        CHECK(network);
+
+        std::map<int, std::vector<T>> inputTensorData    = {{ 0, input }, {1, weights}};
+        std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
+
+        EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+                                                    inputTensorData,
+                                                    expectedOutputTensorData,
+                                                    backends,
+                                                    1.0f);
+    }
+    else
+    {
+        // Tests constant weights and non constant bias.
+        ConstTensor weightsConstantTensor(weightsDesc, weights.data());
+
+        armnn::INetworkPtr network = CreateFullyConnectedNetworkConstWeightsNonConstBias(inputTensorInfo,
+                                                                                         outputTensorInfo,
+                                                                                         weightsDesc,
+                                                                                         biasesDesc,
+                                                                                         weightsConstantTensor,
+                                                                                         descriptor);
+        CHECK(network);
+
+        std::map<int, std::vector<T>> inputTensorData    = {{ 0, input }, {2, biasValues}};
+        std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutput }};
+
+        EndToEndLayerTestImpl<ArmnnType, ArmnnType>(move(network),
+                                                    inputTensorData,
+                                                    expectedOutputTensorData,
+                                                    backends,
+                                                    1.0f);
+    }
+}
+
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index c47048e..dcf87fe 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -22,56 +22,6 @@
 
 template<typename T, typename B>
 LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::ITensorHandleFactory& tensorHandleFactory,
-        armnn::TensorInfo inputTensorInfo,
-        armnn::TensorInfo outputTensorInfo,
-        armnn::TensorInfo weightsDesc,
-        armnn::TensorInfo biasesDesc,
-        std::vector<T>& weights,
-        std::vector<B>& bias,
-        std::vector<T>& input,
-        bool biasEnabled,
-        bool transposeWeights)
-{
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::FullyConnectedQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    armnn::ScopedTensorHandle weightsTensor(weightsDesc);
-    armnn::ScopedTensorHandle biasTensor(biasesDesc);
-
-    std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
-
-    AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
-    AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
-
-    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-    data.m_Weight = &weightsTensor;
-    data.m_Bias = &biasTensor;
-    data.m_Parameters.m_BiasEnabled = biasEnabled;
-    data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
-    LayerTestResult<T, 2> result(outputTensorInfo);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), input.data());
-
-    ExecuteWorkload(*workload, memoryManager);
-
-    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
-    result.m_ActualData = actualOutput;
-
-    return result;
-}
-
-template<typename T, typename B>
-LayerTestResult<T, 2> SimpleFullyConnectedTestWeightsAsInputsImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::ITensorHandleFactory& tensorHandleFactory,
@@ -83,7 +33,8 @@
     std::vector<B>& bias,
     std::vector<T>& input,
     bool biasEnabled,
-    bool transposeWeights)
+    bool transposeWeights,
+    bool constantWeights)
 {
     std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
@@ -93,13 +44,23 @@
 
     armnn::FullyConnectedQueueDescriptor data;
     armnn::WorkloadInfo info;
+    armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
+    armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
+    AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
 
     AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
     AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor;
+
     data.m_Parameters.m_BiasEnabled = biasEnabled;
     data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
-    data.m_Parameters.m_ConstantWeights = false;
+    data.m_Parameters.m_ConstantWeights = constantWeights;
 
     std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
     if (biasEnabled)
@@ -180,36 +141,19 @@
 
     std::vector<int32_t> bias = {9250, 67500};
 
-    if (constantWeights)
-    {
-        result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
-                                                 memoryManager,
-                                                 tensorHandleFactory,
-                                                 inputTensorInfo,
-                                                 outputTensorInfo,
-                                                 weightsDesc,
-                                                 biasesDesc,
-                                                 weights,
-                                                 bias,
-                                                 input,
-                                                 biasEnabled,
-                                                 true);
-    }
-    else
-    {
-        result = SimpleFullyConnectedTestWeightsAsInputsImpl<T>(workloadFactory,
-                                                 memoryManager,
-                                                 tensorHandleFactory,
-                                                 inputTensorInfo,
-                                                 outputTensorInfo,
-                                                 weightsDesc,
-                                                 biasesDesc,
-                                                 weights,
-                                                 bias,
-                                                 input,
-                                                 biasEnabled,
-                                                 true);
-    }
+    result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
+                                             memoryManager,
+                                             tensorHandleFactory,
+                                             inputTensorInfo,
+                                             outputTensorInfo,
+                                             weightsDesc,
+                                             biasesDesc,
+                                             weights,
+                                             bias,
+                                             input,
+                                             biasEnabled,
+                                             true,
+                                             constantWeights);
 
     if (biasEnabled)
     {
@@ -299,7 +243,7 @@
         inputTensorInfo, outputTensorInfo,
         weightsDesc, biasesDesc,
         weights, biasValues, input,
-        true, transposeWeights
+        true, transposeWeights, true
     );
 
     result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
@@ -408,7 +352,7 @@
         inputTensorInfo, outputTensorInfo,
         weightsDesc, biasesDesc,
         weights, biasValues, input,
-        biasEnabled, transposeWeights
+        biasEnabled, transposeWeights, true
     );
 
     std::vector<float> expectedOutput =
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 441f4eb..2855957 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -12,7 +12,8 @@
 const BackendCapabilities cpuRefCapabilities("CpuRef",
                                              {
                                                     {"NonConstWeights", true},
-                                                    {"AsyncExecution", true}
+                                                    {"AsyncExecution", true},
+                                                    {"ConstantTensorsAsInputs", true}
                                              });
 
 const std::set<armnn::BackendCapability> oldCpuRefCapabilities {
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 4293ef5..fae8d0c 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -486,6 +486,24 @@
         TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
 }
 
+TEST_CASE("CreateFullyConnectedWorkloadWeightsBiasesAsInputsFloat32")
+{
+    Graph graph;
+    RefWorkloadFactory factory = GetFactory();
+
+    auto workload =
+            CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest<RefFullyConnectedWorkload,
+                                                                  armnn::DataType::Float32>(factory, graph);
+
+    // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
+    float inputsQScale = 0.0f;
+    float outputQScale = 0.0f;
+    CheckInputsOutput(std::move(workload),
+                      TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale),
+                      TensorInfo({ 7, 20 }, armnn::DataType::Float32, inputsQScale),
+                      TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
+}
+
 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
 static void RefCreateFullyConnectedWorkloadTest()
 {
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 69a2048..424df97 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -600,11 +600,21 @@
     FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
 }
 
-TEST_CASE("RefFullyConnectedEndToEndTestInt32")
+TEST_CASE("RefFullyConnectedEndToEndTestFloat32")
 {
     FullyConnectedWithDynamicWeightsEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
+TEST_CASE("RefFullyConnectedEndToEndTestNonConstantWeightsConstantBiasesFloat32")
+{
+    FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, true);
+}
+
+TEST_CASE("RefFullyConnectedEndToEndTestConstantWeightsNonConstantBiasesFloat32")
+{
+    FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, false);
+}
+
 TEST_CASE("RefGatherFloatTest")
 {
     GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index 99e3eab..5a7951e 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -16,20 +16,6 @@
     const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
         : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
 {
-    if (descriptor.m_Parameters.m_ConstantWeights)
-    {
-        m_Weight = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Weight));
-        const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
-        m_WeightShape = rWeightInfo.GetShape();
-        m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(true));
-
-        if (descriptor.m_Parameters.m_BiasEnabled)
-        {
-            m_Bias = std::make_unique<ScopedTensorHandle>(*(descriptor.m_Bias));
-            const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
-            m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
-        }
-    }
 }
 
 void RefFullyConnectedWorkload::PostAllocationConfigure()
@@ -44,18 +30,15 @@
     ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
     m_InputShape = inputInfo.GetShape();
 
-    if (!m_Data.m_Parameters.m_ConstantWeights)
-    {
-        const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
-        ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
-        m_WeightShape = rWeightInfo.GetShape();
-        m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
+    const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
+    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
+    m_WeightShape = rWeightInfo.GetShape();
+    m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
 
-        if (m_Data.m_Parameters.m_BiasEnabled)
-        {
-            const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
-            m_BiasDecoder = MakeDecoder<float>(biasInfo);
-        }
+    if (m_Data.m_Parameters.m_BiasEnabled)
+    {
+        const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
+        m_BiasDecoder = MakeDecoder<float>(biasInfo);
     }
 
     const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
@@ -87,13 +70,10 @@
     std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
     std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
 
-    if (!m_Data.m_Parameters.m_ConstantWeights)
+    m_WeightDecoder->Reset(inputs[1]->Map());
+    if (m_Data.m_Parameters.m_BiasEnabled)
     {
-        m_WeightDecoder->Reset(inputs[1]->Map());
-        if (m_Data.m_Parameters.m_BiasEnabled)
-        {
-            m_BiasDecoder->Reset(inputs[2]->Map());
-        }
+        m_BiasDecoder->Reset(inputs[2]->Map());
     }
 
     FullyConnected(m_InputShape,