optim: Fix issue with IFM streaming of LUT

Signed-off-by: Michael McGeagh <michael.mcgeagh@arm.com>
Change-Id: I3c3ed73a6db39615ddf5987dc5696b6b09682be0
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 86cc3c0..b6551cf 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -398,8 +398,8 @@
 
         if (
             op.inputs[0].quantization is None
-            or not op.inputs[0].quantization.is_scaling_equal(op.inputs[1].quantization)
-            or not op.inputs[0].quantization.is_scaling_equal(op.outputs[0].quantization)
+            or not op.inputs[0].is_scaling_equal(op.inputs[1])
+            or not op.inputs[0].is_scaling_equal(op.outputs[0])
         ):
             print(
                 "Warning: Input/output tensors with different quantization is unsupported for the", op.type, "operator"