MLBEDSW-5315 MLCE: Vela to handle skip Tensor

Removed graph optimizations no longer needed that caused problems
with FullyConnected operators running on CPU being consumed by
elementwise operators in Vela.

Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: Ic7e66141ccd5e9aa8f0022c5ab9e7fd1ba3f6786
diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py
index cf211de..f59edde 100644
--- a/ethosu/vela/tflite_graph_optimiser.py
+++ b/ethosu/vela/tflite_graph_optimiser.py
@@ -40,7 +40,6 @@
 from .graph_optimiser_util import set_ifm_ofm_op_shapes
 from .graph_optimiser_util import set_tensor_equivalence
 from .numeric_util import clamp_sigmoid
-from .numeric_util import full_shape
 from .numeric_util import round_away_zero
 from .operation import create_activation_function
 from .operation import ExplicitScaling
@@ -623,26 +622,6 @@
     return op
 
 
-def fixup_elementwise_with_scalars(op, arch, nng):
-    if op.type.is_binary_elementwise_op():
-        ifm_tensor, ifm2_tensor, _, _ = op.get_ifm_ifm2_weights_ofm()
-        if ifm2_tensor.shape != [] and ifm_tensor.shape != []:
-            diff = len(ifm_tensor.shape) - len(ifm2_tensor.shape)
-            if diff > 0:
-                ifm2_tensor.shape = full_shape(len(ifm_tensor.shape), ifm2_tensor.shape, 1)
-            elif diff < 0:
-                ifm_tensor.shape = full_shape(len(ifm2_tensor.shape), ifm_tensor.shape, 1)
-        elif ifm_tensor.shape == [] and ifm_tensor.values is None:
-            # IFM is marked as a scalar, but is a result of an operation; change it to a shape of size 1
-            ifm_tensor.shape = len(ifm2_tensor.shape) * [1]
-            ifm_tensor.storage_shape = ifm_tensor.shape
-        elif ifm2_tensor.shape == [] and ifm2_tensor.values is None:
-            # IFM2 is marked as a scalar, but is a result of an operation; change it to a shape of size 1
-            ifm2_tensor.shape = len(ifm_tensor.shape) * [1]
-            ifm2_tensor.storage_shape = ifm2_tensor.shape
-    return op
-
-
 def convert_softmax(op, arch, nng):
     if op.type == Op.Softmax and op.run_on_npu:
         softmax = SoftMax(op)
@@ -1423,7 +1402,6 @@
         convert_batched_fc_shape,
         fixup_conv2d_backprop,
         fixup_relus_with_differing_ifm_ofm_scaling,
-        fixup_elementwise_with_scalars,
         reorder_depthwise_weights,
         fixup_resizebilinear,
         fixup_bias_tensors,