MLCE-825: Give reason when workload unsupported for Non Constant Weights/Bias

 * BackendHelper.cpp IsXXXLayerSupported doesn't get as far as Neon/Cl
   Validate functions where arm_compute::Status is returned.
 * Conv2d, Depthwise, DilatedDepthwise and FullyConnected
 * Tidy up if() -> if ()
 * Clean up logic in FullyConnected so that isLayerSupported gets called

Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>

Change-Id: I5da1a882f4a2f55e90aa984b2b9548a847cb3a2d
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 2d70d7a..a5278eb 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -76,19 +76,19 @@
             {
                 return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
             }
-            else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
+            else if (capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
             {
                 return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
             }
-            else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
+            else if (capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
             {
                 return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
             }
-            else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
+            else if (capability.GetValue().IsString() && backendCapability.GetValue().IsString())
             {
                 return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
             }
-            else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
+            else if (capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
             {
                 return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
             }
@@ -374,21 +374,32 @@
     TensorInfos infos{input, output, weights, biasesVal};
 
     Optional<const BackendOptions::BackendOption> capability ;
-    if(!m_BackendId.IsUndefined())
+    if (!m_BackendId.IsUndefined())
     {
-        capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
-        if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+        capability = GetCapability("NonConstWeights", m_BackendId);
+        if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
         {
-            if(!weights.IsConstant())
+            if (!weights.IsConstant())
             {
+                if (reasonIfUnsupported.has_value())
+                {
+                    reasonIfUnsupported.value() =
+                        "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+                        "Convolution2d weights are set as dynamic (non constant). ";
+                }
                 return false;
             }
-            if (descriptor.m_BiasEnabled && !biases.has_value())
+            if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
             {
+                if (reasonIfUnsupported.has_value())
+                {
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+                            "Convolution2d biases are set as dynamic (non constant). ";
+                }
                 return false;
             }
 
-
             // At the first stage we will only print a warning. this is to give
             // backend developers a chance to adopt and read weights from input slots.
             ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
@@ -465,21 +476,30 @@
     TensorInfos infos{input, output, weights, biasesVal};
 
     Optional<const BackendOptions::BackendOption> capability ;
-    if(!m_BackendId.IsUndefined())
+    if (!m_BackendId.IsUndefined())
     {
-        capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
-        if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+        capability = GetCapability("NonConstWeights", m_BackendId);
+        if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
         {
-            if(!weights.IsConstant())
+            if (!weights.IsConstant())
             {
+                if (reasonIfUnsupported.has_value())
+                {
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+                            "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
+                }
                 return false;
             }
-            if(descriptor.m_BiasEnabled)
+            if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
             {
-                if(!biases.value().IsConstant())
+                if (reasonIfUnsupported.has_value())
                 {
-                    return false;
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+                            "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
                 }
+                return false;
             }
             // At the first stage we will only print a warning. this is to give
             // backend developers a chance to adopt and read weights from input slots.
@@ -544,21 +564,30 @@
     TensorInfos infos{input, output, weights, biasesVal};
 
     Optional<const BackendOptions::BackendOption> capability ;
-    if(!m_BackendId.IsUndefined())
+    if (!m_BackendId.IsUndefined())
     {
-        capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
-        if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+        capability = GetCapability("NonConstWeights", m_BackendId);
+        if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
         {
-            if(!weights.IsConstant())
+            if (!weights.IsConstant())
             {
+                if (reasonIfUnsupported.has_value())
+                {
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+                            "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
+                }
                 return false;
             }
-            if(descriptor.m_BiasEnabled)
+            if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
             {
-                if(!biases.value().IsConstant())
+                if (reasonIfUnsupported.has_value())
                 {
-                    return false;
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+                            "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
                 }
+                return false;
             }
             // At the first stage we will only print a warning. this is to give
             // backend developers a chance to adopt and read weights from input slots.
@@ -657,34 +686,44 @@
                                                    const FullyConnectedDescriptor& descriptor,
                                                    Optional<std::string&> reasonIfUnsupported)
 {
-    if(!m_BackendId.IsUndefined())
+    TensorInfos infos{input, output, weights, biases};
+
+    Optional<const BackendOptions::BackendOption> capability;
+    if (!m_BackendId.IsUndefined())
     {
-        auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
-        if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
+        capability = GetCapability("NonConstWeights", m_BackendId);
+        if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
         {
-            if(!weights.IsConstant())
+            if (!descriptor.m_ConstantWeights)
             {
                 if (reasonIfUnsupported.has_value())
                 {
                     reasonIfUnsupported.value() =
-                        "This backend might not support non constant weights. "
-                        "If weights are constant make sure to set IsConstant when creating TensorInfo";
+                            "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+                            "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
+                }
+                return false;
+            }
+            if (!weights.IsConstant())
+            {
+                if (reasonIfUnsupported.has_value())
+                {
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+                            "FullyConnected weights are set as dynamic (non constant). ";
                 }
 
                 return false;
             }
-            if(descriptor.m_BiasEnabled)
+            if (descriptor.m_BiasEnabled && !biases.IsConstant())
             {
-                if(!biases.IsConstant())
+                if (reasonIfUnsupported.has_value())
                 {
-                    if (reasonIfUnsupported.has_value())
-                    {
-                        reasonIfUnsupported.value() =
-                            "This backend might not support non constant bias. "
-                            "If bias are constant make sure to set IsConstant when creating TensorInfo";
-                    }
-                    return false;
+                    reasonIfUnsupported.value() =
+                            "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+                            "FullyConnected biases are set as dynamic (non constant). ";
                 }
+                return false;
             }
 
             // At the first stage we will only print a warning. this is to give
@@ -694,20 +733,8 @@
                                   "doxygen documentation on github https://github.com/ARM-software/armnn "
                                   "under the keyword 'ConstTensorsAsInputs'.";
         }
-
-        if(!descriptor.m_ConstantWeights)
-        {
-            capability = GetCapability("NonConstWeights", m_BackendId);
-            if (capability.has_value() && capability.value().GetValue().AsBool() == true)
-            {
-                return true;
-            }
-            return false;
-        }
     }
 
-    TensorInfos infos{input, output, weights, biases};
-
     return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
                                             infos,
                                             descriptor,
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 762645b..6b0a3b8 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -28,15 +28,6 @@
                                                     bool isFastMathEnabled,
                                                     const ActivationDescriptor* activationDescriptor)
 {
-    // The arm_compute::CLConvolutionLayer supports both const and non const
-    // weights. However, in the case of non const weights we'd have to call
-    // prepare or configure for each inference which we're not setup to do just yet.
-    if (!weights.IsConstant())
-    {
-        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
-                                   "ArmNN ClConvolution2dWorkload does not support non constant weights."};
-    }
-
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
     arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 3a972d3..42fe400 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -30,15 +30,6 @@
                                                            const Optional<TensorInfo>& biases,
                                                            const ActivationDescriptor* activationDescriptor)
 {
-    // The CL implemented workload does support both const and non const
-    // weights. However, in the case of non const weights we'd have to call
-    // prepare or configure for each inference which we're not setup to do just yet.
-    if (!weights.IsConstant())
-    {
-        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
-                                   "ArmNN ClDepthwiseConv2dWorkload does not support non constant weights."};
-    }
-
     const arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input,  descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
 
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index c2da5f2..0e1efe0 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -23,14 +23,6 @@
                                                      const FullyConnectedDescriptor& descriptor,
                                                      const ActivationDescriptor* activationDescriptor)
 {
-    // The CL implemented workload does support both const and non const
-    // weights. However, in the case of non const weights we'd have to call
-    // prepare or configure for each inference which we're not setup to do just yet.
-    if (!weights.IsConstant())
-    {
-        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
-                                    "Arm NN ClFullyConnectedWorkload does not support non constant weights."};
-    }
     const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
     arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 12d8c46..586b9c9 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -29,15 +29,6 @@
                                                       bool isFastMathEnabled,
                                                       const ActivationDescriptor* activationDescriptor)
 {
-    // arm_compute::NEConvolutionLayer supports both const and non const
-    // weights. However, in the case of non const weights we'd have to call
-    // prepare or configure for each inference which we're not setup to do just yet.
-    if (!weights.IsConstant())
-    {
-        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
-                                   "ArmNN NeonConvolution2dWorkload does not support non constant weights."};
-    }
-
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
     arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 9eeac6e..e2d0a82 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -33,15 +33,6 @@
                                                              const Optional<TensorInfo>& biases,
                                                              const ActivationDescriptor* activationDescriptor)
 {
-    // The Neon implemented workload does support both const and non const
-    // weights. However, in the case of non const weights we'd have to call
-    // prepare or configure for each inference which we're not setup to do just yet.
-    if (!weights.IsConstant())
-    {
-        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
-                                   "ArmNN NeonDepthwiseConv2dWorkload does not support non constant weights."};
-    }
-
     const arm_compute::TensorInfo aclInputInfo   = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo  = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
 
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index d371680..0b91eb3 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -28,14 +28,6 @@
                                                        const FullyConnectedDescriptor& descriptor,
                                                        const ActivationDescriptor* activationDescriptor)
 {
-    // The NEON implemented workload does support both const and non const
-    // weights. However, in the case of non const weights we'd have to call
-    // prepare or configure for each inference which we're not setup to do just yet.
-    if (!weights.IsConstant())
-    {
-        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
-                                    "Arm NN NeonFullyConnectedWorkload does not support non constant weights."};
-    }
     const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
     arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);