Further update for MLBEDSW-1872

 - Pack and reshape operators were manipulating tensors of
   different equivalence IDs that refer to the same memory
   area, causing issues with block dependency checking.
   Ideally we'd use address overlap calculations for accuracy,
   but this commit implements a generalised solution by
   setting memory op IO tensors to use the same equivalence ID.

Change-Id: Ia59ae3900f508ffeebaf7af4bca32f5be4e69345
Signed-off-by: Tim Hall <tim.hall@arm.com>
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index b2b233e..913b9a6 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -400,6 +400,15 @@
     return op
 
 
+# Set input/output tensor equivalence to the same id for memory operations
+def set_tensor_equivalence(op, arch):
+    if op.type == "Reshape":
+        eid = op.outputs[0].equivalence_id
+        for inp in op.inputs:
+            inp.equivalence_id = eid
+    return op
+
+
 def convert_mul_max_to_abs_or_lrelu(op, arch):
     r"""Whenever there is a subgraph with this topology:
 
@@ -473,6 +482,7 @@
     op_rewrite_list = [
         # mark block type and check if the operations are supported
         mark_npu_block_type,
+        set_tensor_equivalence,
         supported_operator_check,
         # then do any rewrites of supported operators
         convert_depthwise_to_conv,