MLBEDSW-5361 - Fix per-axis quantization support

This commit fixes a number of bugs where per-axis
quantization would make Vela crash and would not
be properly recognized.

Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I50a461d200274b43ec76f3a7357bf66db6d49964
diff --git a/ethosu/vela/tflite_model_semantic.py b/ethosu/vela/tflite_model_semantic.py
index 6e2467b..51d1f07 100644
--- a/ethosu/vela/tflite_model_semantic.py
+++ b/ethosu/vela/tflite_model_semantic.py
@@ -282,7 +282,7 @@
         "Input and Output tensors must have quantization scales that fit within float32 precision"
         if op.ofm is not None and op.ofm.is_quantized():
             ofm_scale = op.ofm.quantization.scale_f32
-            if ofm_scale < np.finfo(np.float32).tiny:
+            if np.any(ofm_scale < np.finfo(np.float32).tiny):
                 return (
                     False,
                     f"The quantization scale of the output tensor is {ofm_scale}, "
@@ -290,7 +290,7 @@
                 )
             if op.ifm is not None and op.ifm.is_quantized():
                 ifm_scale = op.ifm.quantization.scale_f32
-                if np.isinf(ifm_scale / ofm_scale):
+                if np.any(np.isinf(ifm_scale / ofm_scale)):
                     return (
                         False,
                         f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",