MLBEDSW-3302: Reject per-channel scaling for unsupported ops

Vela only supports per-channel scaling for
convolution ops. This commit adds a check that
puts ops with per-channel scaling on the CPU.
A caveat worth mentioning is that neither
TensorFlow Lite or TensorFlow Lite Micro support
per-channel scaling for the CPU placed op,
however the problem is moved away from Vela.
This commit also changes a small utility function
in supported_operators.py used for docstring
formatting.

Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I9ed090592f1d05dd4566d3e54dba1ef405299383
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index 62de0d1..86d2475 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -100,6 +100,28 @@
     assert not support.is_operator_supported(op)
 
 
+def test_constraint_tens_quant_per_axis_not_supp():
+    # Quantization scale cannot be array-valued for elemwise ops
+    qp = QuantizationParameters()
+    qp.zero_point = np.zeros((1, 3))
+    qp.scale_f32 = np.ones((1, 3))
+    op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
+    assert not support.is_operator_supported(op)
+
+
+def test_constraint_tens_quant_per_axis_is_supp():
+    op = testutil.create_op_with_quant_tensors(
+        Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
+    )
+    op.attrs = {"stride_w": 1, "stride_h": 1}
+    assert support.is_operator_supported(op)
+    qp = QuantizationParameters()
+    qp.zero_point = np.zeros((1, 3))
+    qp.scale_f32 = np.ones((1, 3))
+    op.bias.quantization = qp
+    assert support.is_operator_supported(op)
+
+
 def test_constraint_faf():
     # Fused activation functions, if set, must be a valid op type
     op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py
index b06008a..8258827 100644
--- a/ethosu/vela/test/testutil.py
+++ b/ethosu/vela/test/testutil.py
@@ -80,7 +80,9 @@
     return op
 
 
-def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=None, datatype=DataType.uint8):
+def create_op_with_quant_tensors(
+    op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8
+):
     ifm = Tensor(ifm_shape, datatype, "in")
     ifm.quantization = default_quant_params()
     ofm = Tensor(ofm_shape, datatype, "out")
@@ -102,6 +104,12 @@
             "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
         )
         op.add_input_tensor(weights)
+    # Optional bias tensor
+    if bias_shape is not None:
+        qp = default_quant_params()
+        qp.zero_point = np.zeros(bias_shape)
+        bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
+        op.add_input_tensor(bias)
     return op