MLBEDSW-2868 Refactor separation of scale + bias tensors

Changed so that there is an option to set if Tensor clone should be
seen as unique or not.

Signed-off-by: Patrik Gustavsson <patrik.gustavsson@arm.com>
Change-Id: Ie51c1a5e84b535380d498b105aa18ccba1c8b27c
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py
index 8786d36..49f93cd 100644
--- a/ethosu/vela/tensor.py
+++ b/ethosu/vela/tensor.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 # Description:
 # Internal representation of a Neural Network Tensor.
+import copy
 import enum
 import uuid
 from collections import defaultdict
@@ -392,34 +393,25 @@
             return self.dtype.size_in_bits() / 8
         return self.element_size_bytes
 
-    def clone(self, suffix="_clone"):
-        res = Tensor(self.shape, self.dtype, self.name + suffix)
-        res.storage_shape = list(self.storage_shape)
-        res.bandwidth_shape = list(self.bandwidth_shape)
+    # Returns a copy, renamed to self.name + suffix
+    # The references to Operators will be empty when returned
+    # Depending on set_unique, the copy is shallow, or deep
+    # For set_unique==True, a new equivalence_id will be set
+    def clone(self, suffix="_clone", set_unique=False):
+        if set_unique:
+            res = copy.deepcopy(self)
+            res.equivalence_id = uuid.uuid4()
+        else:
+            res = copy.copy(self)
+            res.storage_shape = list(self.storage_shape)
+            res.bandwidth_shape = list(self.bandwidth_shape)
+            if self.quantization is not None:
+                res.quantization = self.quantization.clone()
 
+        res.name = res.name + suffix
         res.ops = []
         res.consumer_list = []
 
-        res.values = self.values
-        res.quant_values = self.quant_values
-        res.mem_area = self.mem_area
-        res.mem_type = self.mem_type
-        res.format = self.format
-        res.purpose = self.purpose
-        res.sub_purpose = self.sub_purpose
-        res.alignment = self.alignment
-        res.bandwidth_compression_scale = self.bandwidth_compression_scale
-        res.storage_rounding_quantum = self.storage_rounding_quantum
-
-        if self.quantization is not None:
-            res.quantization = self.quantization.clone()
-        else:
-            res.quantization = None
-
-        res.resampling_mode = self.resampling_mode
-
-        res.copy_compressed_weight_info(self)
-        res.avoid_NHCWB16 = self.avoid_NHCWB16
         return res
 
     def clone_into_fast_storage(self, arch):
@@ -806,9 +798,6 @@
 
         return True
 
-    def set_random_equivalence_id(self):
-        self.equivalence_id = uuid.uuid4()
-
     def __str__(self):
         return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype)
 
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index 82feddd..24f9f87 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -41,9 +41,8 @@
     return s.decode("utf-8")
 
 
-def clone_and_reshape_tensor(src_tens, reorder):
-
-    tens = src_tens.clone("_reshape")
+def clone_and_reshape_tensor(src_tens, reorder, set_unique):
+    tens = src_tens.clone("_reshape", set_unique)
     tens.shape = [src_tens.shape[idx] for idx in reorder]
     tens.bandwidth_shape = tens.shape
     tens.storage_shape = tens.shape
@@ -153,17 +152,16 @@
         if op.type.is_depthwise_conv2d_op() or op.type.is_conv2d_op() or op.type == Op.FullyConnected:
             if inputs[1].values is not None:
                 if op.type == Op.FullyConnected:
-                    inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0))
+                    inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0), False)
                 else:
-                    inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
+                    inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0), False)
             if op.type.needs_bias() and len(inputs) <= op_type.info.indices.biases[0]:
                 # No Bias tensor
                 inputs.append(None)
             if inputs[-1] and inputs[-1].values is not None:
-                inputs[-1] = clone_and_reshape_tensor(inputs[-1], (0,))
                 # Since bias tensor is used for both bias and scale,
-                # set different equivalence_id for all bias tensors
-                inputs[-1].set_random_equivalence_id()
+                # a clone with a unique equivalence_id is needed
+                inputs[-1] = clone_and_reshape_tensor(inputs[-1], (0,), True)
 
         if opt_serializer is not None:
             op.attrs = opt_serializer.deserialize(op_data)