MLBEDSW-3858: Incorrect PAD usage bug

Fixed a bug where PAD having no consumers would result in a crash.
Now the constraint doesn't crash and thus the intended error message is shown, resulting in easier debugging.

Change-Id: I1e4403d47a6152e7adbf7bc065db86d4217d39cc
Signed-off-by: erik.andersson@arm.com <erik.andersson@arm.com>
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 9d31518..1bebe9a 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -829,17 +829,15 @@
     def constraint_pad_ofm(cls, op):
         "Must be followed by one of the following operator types: {}"
         consumers = op.ofm.consumers()
-        consumers_to_pad = 0
-        for consumer in consumers:
-            if consumer.type in cls.supported_pad_consumers:
-                if consumer.attrs["padding"] == Padding.VALID:
-                    consumers_to_pad += 1
-        valid = len(consumers) > 0 and len(consumers) == consumers_to_pad
-        return (
-            valid,
-            f"Operator is followed by {consumers_to_pad} consumers with "
-            f"padding set to VALID, out of {len(consumers)} consumers",
-        )
+        unsupported_consumers = [
+            cons.type
+            for cons in consumers
+            if cons is not None
+            if cons.type not in cls.supported_pad_consumers or cons.attrs["padding"] != Padding.VALID
+        ] + [None for cons in consumers if cons is None]
+        none_string = ", ".join(["NoneType" for cons in consumers if cons is None])
+        valid = len(unsupported_consumers) == 0
+        return valid, f"PAD operator is followed by: {_optype_formatter(unsupported_consumers)+none_string}"
 
     @staticmethod
     def constraint_stridedslice_inputs_const(op):
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index d8fbb98..36213b7 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -518,7 +518,13 @@
 
 
 def create_pad_op(
-    in_shape, out_shape, padding, in_dtype=DataType.int8, out_dtype=DataType.int8, pad_dtype=DataType.int32
+    in_shape,
+    out_shape,
+    padding,
+    in_dtype=DataType.int8,
+    out_dtype=DataType.int8,
+    pad_dtype=DataType.int32,
+    pad_setting=Padding.VALID,
 ):
     qp = testutil.default_quant_params()
     in0 = Tensor(in_shape, in_dtype, "in")
@@ -527,19 +533,17 @@
     out = Tensor(out_shape, out_dtype, "out")
     out.quantization = qp.clone()
     op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
-
     conv_out_tens = Tensor(in_shape, in_dtype, "output")
     conv_out_tens.quantization = qp.clone()
     weight_tens = Tensor(in_shape, in_dtype, "weights")
     weight_tens.values = np.zeros(weight_tens.shape)
     weight_tens.quant_values = np.zeros(weight_tens.shape, np.int8)
     weight_tens.quantization = qp.clone()
-    bias_tens = Tensor([in_shape[-1]], pad_dtype, "biases")
-    attrs = {"padding": Padding.VALID, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1}
+    bias_tens = Tensor(out_shape, pad_dtype, "biases")
+    attrs = {"padding": pad_setting, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1}
     attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1)
-    conv2d_op = testutil.create_op(Op.Conv2D, [out, weight_tens, bias_tens], conv_out_tens, attrs)
+    conv2d_op = testutil.create_op(Op.Conv2DBias, [out, weight_tens, bias_tens], conv_out_tens, attrs)
     conv2d_op.add_input_tensor(out)
-    conv2d_op.set_ifm_ofm_shapes()
     return op
 
 
@@ -581,11 +585,16 @@
 
 def test_constraint_pad_consumer():
     # PAD operator must be followed by a valid consumer with Padding.VALID attribute
-    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],)
-    conv_op = op.ofm.consumers()[0]
-    conv_op.attrs["Padding"] = Padding.SAME
+    op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
+    assert support.is_operator_supported(op)
+    op = create_pad_op(
+        in_shape=[1, 1, 1, 1],
+        out_shape=[1, 3, 3, 1],
+        padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
+        pad_setting=Padding.SAME,
+    )
     assert not support.is_operator_supported(op)
-    op_consumer = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
+    op_consumer = testutil.create_op_with_quant_tensors(Op.ConcatTFLite, [1, 1, 1, 4], [1, 1, 1, 8])
     op.ofm.consumer_list = [op_consumer]
     assert not support.is_operator_supported(op)
     op_consumer = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])