MLBEDSW-3572: Fused activations must not be int32

Added supported operator check that 32-bit fused activation functions
are not supported.

Change-Id: I01fdafeff8fdb13c71eae4f63be7e6f81b9223df
Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index 5f64dd9..3e9724d 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -147,6 +147,16 @@
     assert not support.is_operator_supported(op)
 
 
+def test_constraint_faf_ofm_dtype():
+    # If fused activation function is present, OFM must be 8 or 16 bit
+    shp = [1, 8, 8, 8]
+    for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
+        op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
+        op.activation = ActivationFunction(Op.Relu)
+        expected = dtype.size_in_bytes() <= 2
+        assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
+
+
 def test_constraint_conv_pass():
     # First test a simple conv passes
     op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])