[MLBEDSW-3300] Fix DepthwiseConv2D fails when bias tensor quant_values are None

Fixed DepthwiseConv2D fails when bias tensor quant_values are None.
Also fixed DepthwiseConv2D fails with implicit depth multiplier.

Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: I799a565eefa498ccf7ac626fcd472b8cbd908931
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 4429238..91fcb5a 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -490,7 +490,7 @@
     def constraint_bias_40bit(op):
         "Optional Bias tensor values must fit within 40-bits"
         bias = op.bias
-        if bias and bias.dtype == DataType.int64:
+        if bias and bias.dtype == DataType.int64 and bias.quant_values is not None:
             valid = all(len(bin(quant_value)[2:]) <= 40 for quant_value in bias.quant_values)
             return valid, f"Tensor '{bias.name}' has values larger than 40-bits"
         return True, "Op has no bias tensor, or it fits in 40-bit"
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index c190f7e..9e20215 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -188,6 +188,12 @@
             if "depth_multiplier" in op.attrs:
                 op.attrs["channel_multiplier"] = op.attrs["depth_multiplier"]
 
+            if op_type == Op.DepthwiseConv2DBias and op.attrs["depth_multiplier"] == 0:
+                # The depth multiplier is implicit and is calculated as weight channels / ifm channels
+                # Note however that the weights have been reshaped above.
+                # The original value is cached above in channel_multiplier
+                op.attrs["depth_multiplier"] = op.weights.shape[2] // op.ifm.shape[-1]
+
             faf = op.attrs.pop("fused_activation_function", None)
             if faf is not None:
                 op.activation = create_activation_function(faf)