Handle optional biases better in Neon/Cl FullyConnected workloads

Use armnn::Optional for optional bias TensorInfos, similar to how
it's already done in Convolution etc.

Fixes some test failures found using -fsanitize=undefined

Change-Id: I7b887e63e2ffab14aeab14415069be738d938ebb
Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 0fc5da7..018adec 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -399,11 +399,18 @@
                             {
                                 FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
 
+                                Optional<TensorInfo> biases;
+
+                                if (baseLayer->GetParameters().m_BiasEnabled)
+                                {
+                                    biases = baseLayer->m_Bias->GetTensorInfo();
+                                }
+
                                 arm_compute::Status status = ClFullyConnectedWorkloadValidate(
                                         baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
                                         activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
                                         baseLayer->m_Weight->GetTensorInfo(),
-                                        baseLayer->m_Bias->GetTensorInfo(),
+                                        biases,
                                         baseLayer->GetParameters(),
                                         &activationDesc);
 
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 3eb53e6..017f4ff 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -19,7 +19,7 @@
 arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
                                                      const TensorInfo& output,
                                                      const TensorInfo& weights,
-                                                     const TensorInfo& biases,
+                                                     const Optional<TensorInfo>& biases,
                                                      const FullyConnectedDescriptor& descriptor,
                                                      const ActivationDescriptor* activationDescriptor)
 {
@@ -31,7 +31,8 @@
     arm_compute::TensorInfo* optionalAclBiases = nullptr;
     if (descriptor.m_BiasEnabled)
     {
-        aclBiases = BuildArmComputeTensorInfo(biases);
+        ARMNN_ASSERT(biases.has_value());
+        aclBiases = BuildArmComputeTensorInfo(biases.value());
         optionalAclBiases = &aclBiases;
     }
 
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp
index 2107577..3ab9f98 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.hpp
@@ -18,7 +18,7 @@
 arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
                                                      const TensorInfo& output,
                                                      const TensorInfo& weights,
-                                                     const TensorInfo& biases,
+                                                     const Optional<TensorInfo>& biases,
                                                      const FullyConnectedDescriptor& descriptor,
                                                      const ActivationDescriptor* activationDescriptor = nullptr);
 
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 66547ad..7089f23 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -250,12 +250,18 @@
                             else if (base.GetType() == LayerType::FullyConnected)
                             {
                                 FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
+                                Optional<TensorInfo> biases;
+
+                                if (baseLayer->GetParameters().m_BiasEnabled)
+                                {
+                                    biases = baseLayer->m_Bias->GetTensorInfo();
+                                }
 
                                 arm_compute::Status status = NeonFullyConnectedWorkloadValidate(
                                         baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
                                         activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
                                         baseLayer->m_Weight->GetTensorInfo(),
-                                        baseLayer->m_Bias->GetTensorInfo(),
+                                        biases,
                                         baseLayer->GetParameters(),
                                         &activationDesc);
 
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 39a5696..26c68b7 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -24,7 +24,7 @@
 arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
                                                        const TensorInfo& output,
                                                        const TensorInfo& weights,
-                                                       const TensorInfo& biases,
+                                                       const Optional<TensorInfo>& biases,
                                                        const FullyConnectedDescriptor& descriptor,
                                                        const ActivationDescriptor* activationDescriptor)
 {
@@ -36,7 +36,8 @@
     arm_compute::TensorInfo* optionalAclBiases = nullptr;
     if (descriptor.m_BiasEnabled)
     {
-        aclBiases = BuildArmComputeTensorInfo(biases);
+        ARMNN_ASSERT(biases.has_value());
+        aclBiases = BuildArmComputeTensorInfo(biases.value());
         optionalAclBiases = &aclBiases;
     }
 
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
index b5f6160..419a329 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.hpp
@@ -20,7 +20,7 @@
 arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
                                                        const TensorInfo& output,
                                                        const TensorInfo& weights,
-                                                       const TensorInfo& biases,
+                                                       const Optional<TensorInfo>& biases,
                                                        const FullyConnectedDescriptor& descriptor,
                                                        const ActivationDescriptor* activationDescriptor = nullptr);