MLBEDSW-2372: Failing assert for ResizeBilinear with upscale != 2x

This commit fixes the failing assert by removing it
and instead placing unsupported ResizeBilinear
operators on the CPU.

It introduces a new graph optimisation function
which adds the necessary attributes as well as
new operator restrictions for ResizeBilinear.

Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I2feffd0b5a2169ebffbe4f165e450b3f2d140380
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index 72bb486..758b51a 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -23,6 +23,7 @@
 from . import rewrite_graph
 from .data_type import DataType
 from .errors import UnsupportedFeatureError
+from .ethos_u55_regs.ethos_u55_regs import resampling_mode
 from .operation import NpuBlockType
 from .operation import Operation
 from .tensor import Tensor
@@ -483,6 +484,30 @@
     return op
 
 
+def add_attrs_to_resizebilinear(op, arch):
+    if op.type == 'ResizeBilinear' and op.run_on_npu:
+        input_tensor = op.inputs[0]
+        upscaled_shape = [input_tensor.shape[1] * 2, input_tensor.shape[2] * 2]
+        out_shape = op.outputs[0].shape[1:3]
+        if not op.attrs["align_corners"] and out_shape == upscaled_shape:
+            # this means the output is supposed to be a x2 upscale,
+            # so we need to do SAME padding
+            op.attrs["padding"] = b"SAME"
+        elif op.attrs["align_corners"] and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
+            # here we can just run the avg pool without padding and
+            # produce a (M * 2 - 1, N * 2 - 1) sized output
+            op.attrs["padding"] = b"VALID"
+        else:
+            # If this exception is raised, something is wrong with the supported op check
+            raise UnsupportedFeatureError("Unsupported upscaling factor")
+        input_tensor.resampling_mode = resampling_mode.NEAREST
+        op.attrs.update({
+            'strides': (1, 1, 1, 1),
+            'ksize': (1, 2, 2, 1),
+        })
+    return op
+
+
 def supported_operator_check(op, arch):
     op.run_on_npu = arch.supported_operators.is_operator_supported(op)
     return op
@@ -503,6 +528,7 @@
         fixup_pack_input,
         fixup_conv2d_backprop,
         fixup_act_reorder,
+        add_attrs_to_resizebilinear,
         add_padding_fields,
         mark_npu_block_type,
         fixup_elementwise_with_scalars,
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index ce3fa60..729d435 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -29,6 +29,7 @@
         self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct"))
         self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct"))
         self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops
+        self.resizing_ops = set(("ResizeBilinear",))
         self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct"))
         self.mac_main_ops = (
             # convolutions
@@ -37,12 +38,12 @@
             | self.depthwise_convolution_ops
             # pooling
             | self.pooling_ops
+            # resizing/upscaling
+            | self.resizing_ops
             # FC layers
             | self.fc_vector_products
             # RNN/LSTM/GRU
             | set(("BlockLSTM"))
-            # deconvolution
-            | set(("ResizeBilinear",))
         )
         self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs"))
         self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum"))
@@ -90,6 +91,7 @@
             {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
         )
         self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
+        self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops})
         self.supported_operator_restrictions.update(
             {op: self.check_vector_product_restrictions for op in self.fc_vector_products}
         )
@@ -206,6 +208,17 @@
                 return False
         return True
 
+    def check_resize_restrictions(self, op):
+        # check unsupported upscaling factor
+        if op.type == "ResizeBilinear":
+            upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
+            out_shape = op.outputs[0].shape[1:3]
+            if not op.attrs["align_corners"] and out_shape != upscaled_shape:
+                return False
+            elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
+                return False
+        return True
+
     def check_vector_product_restrictions(self, op):
         # check data type
         ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index 4ee3963..109ae0e 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -20,7 +20,6 @@
 import numpy as np
 
 from .errors import UnsupportedFeatureError
-from .ethos_u55_regs.ethos_u55_regs import resampling_mode
 from .nn_graph import Graph
 from .nn_graph import Subgraph
 from .operation import Operation
@@ -146,24 +145,6 @@
         if opt_serializer is not None:
             op.attrs = opt_serializer.deserialize(op_data.BuiltinOptions(), op_data.CustomOptionsAsNumpy())
 
-            if op_type.startswith("ResizeBilinear"):
-                input_tensor = op.inputs[0]
-                upscaled_shape = [input_tensor.shape[1] * 2, input_tensor.shape[2] * 2]
-                out_shape = op.outputs[0].shape[1:3]
-                if not op.attrs["align_corners"] and out_shape == upscaled_shape:
-                    # this means the output is supposed to be a x2 upscale,
-                    # so we need to do SAME padding
-                    op.attrs.update({"padding": b"SAME"})
-                elif op.attrs["align_corners"] and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
-                    # here we can just run the avg pool without padding and
-                    # produce a (M * 2 - 1, N * 2 - 1) sized output
-                    op.attrs.update({"padding": b"VALID"})
-                else:
-                    raise UnsupportedFeatureError("ResizeBilinear: Only 2x upscaling is supported")
-                op.attrs.update({"filter_width": 2, "filter_height": 2, "stride_w": 1, "stride_h": 1})
-
-                input_tensor.resampling_mode = resampling_mode.NEAREST
-
             if "stride_w" in op.attrs:
                 op.attrs["strides"] = (1, op.attrs["stride_h"], op.attrs["stride_w"], 1)
             if "filter_width" in op.attrs: