MLBEDSW-2372: Failing assert for ResizeBilinear with upscale != 2x

This commit fixes the failing assert by removing it
and instead placing unsupported ResizeBilinear
operators on the CPU.

It introduces a new graph optimisation function
which adds the necessary attributes as well as
new operator restrictions for ResizeBilinear.

Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I2feffd0b5a2169ebffbe4f165e450b3f2d140380
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index ce3fa60..729d435 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -29,6 +29,7 @@
         self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct"))
         self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct"))
         self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops
+        self.resizing_ops = set(("ResizeBilinear",))
         self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct"))
         self.mac_main_ops = (
             # convolutions
@@ -37,12 +38,12 @@
             | self.depthwise_convolution_ops
             # pooling
             | self.pooling_ops
+            # resizing/upscaling
+            | self.resizing_ops
             # FC layers
             | self.fc_vector_products
             # RNN/LSTM/GRU
             | set(("BlockLSTM"))
-            # deconvolution
-            | set(("ResizeBilinear",))
         )
         self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs"))
         self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum"))
@@ -90,6 +91,7 @@
             {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
         )
         self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
+        self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops})
         self.supported_operator_restrictions.update(
             {op: self.check_vector_product_restrictions for op in self.fc_vector_products}
         )
@@ -206,6 +208,17 @@
                 return False
         return True
 
+    def check_resize_restrictions(self, op):
+        # check unsupported upscaling factor
+        if op.type == "ResizeBilinear":
+            upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
+            out_shape = op.outputs[0].shape[1:3]
+            if not op.attrs["align_corners"] and out_shape != upscaled_shape:
+                return False
+            elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
+                return False
+        return True
+
     def check_vector_product_restrictions(self, op):
         # check data type
         ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()