MLBEDSW-2569:Support 1x1 IFM ResizeBilinear

Signed-off-by: Charles Xu <charles.xu@arm.com>
Change-Id: I44428d77b2e8e44a477e5c4dfe28ab8dd1792838
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index c805be5..355b16f 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -27,6 +27,7 @@
 from .numeric_util import full_shape
 from .operation import NpuBlockType
 from .operation import Operation
+from .tensor import QuantizationParameters
 from .tensor import Tensor
 
 passthrough_nodes = set(("Identity",))
@@ -181,6 +182,39 @@
     return op
 
 
+# Convert the op to an elementwise add
+def convert_resizebilinear_1x1_to_add(op):
+    op.type = "AddAct"
+    op.name = op.name + "_add"
+    op.attrs.update({"npu_block_type": NpuBlockType.ElementWise})
+    op.attrs["resizebilinear"] = True
+    # Create an input tensor filled with zeros
+    shape = op.outputs[0].shape
+    tens = Tensor(shape, op.inputs[0].dtype, op.inputs[1].name + "_add")
+    tens.values = np.zeros(shape)
+    tens.quant_values = np.zeros(shape, np.uint8)
+    tens.quantization = QuantizationParameters(0.0, 255.0)
+    tens.quantization.scale_f32 = 1.0
+    tens.quantization.zero_point = 0
+    tens.consumer_list = [op]
+    tens_op = op.inputs[1].ops[0]
+    tens_op.outputs = [tens]
+    tens.ops = [tens_op]
+    # Set the add inputs
+    op.inputs[1] = op.inputs[0]
+    op.inputs[0] = tens
+
+    return op
+
+
+def fixup_resizebilinear(op, arch):
+    if op.type == "ResizeBilinear":
+        if op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1:
+            convert_resizebilinear_1x1_to_add(op)
+
+    return op
+
+
 def fixup_fully_connected_input(op, arch):
     if op.type == "FullyConnectedAct":
         inp = op.inputs[0]
@@ -614,8 +648,7 @@
             # produce a (M * 2 - 1, N * 2 - 1) sized output
             op.attrs["padding"] = b"VALID"
         else:
-            # If this exception is raised, something is wrong with the supported op check
-            raise UnsupportedFeatureError("Unsupported upscaling factor")
+            return op
         input_tensor.resampling_mode = resampling_mode.NEAREST
         op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)})
     return op
@@ -647,6 +680,7 @@
         mark_npu_block_type,
         fixup_elementwise_with_scalars,
         reorder_depthwise_weights,
+        fixup_resizebilinear,
         # convert_mul_max_to_abs_or_lrelu # TODO: enable optimisation once quantisation issues are resolved
     ]