vela: Change Shape4D mutability usage

 - Removed requirement for cloning shapes when unique values required
   by forcing top-level immutability. This alleviates issues with Shapes
   being unintentionally shared and then mutated as if value-types.
 - Shape4D fields can no longer be assigned without replication.

Signed-off-by: Tim Hall <tim.hall@arm.com>
Change-Id: Ic0dbfa349eb0215eabefb4f4e2cf99f12d83699c
diff --git a/ethosu/vela/npu_performance.py b/ethosu/vela/npu_performance.py
index 3acd5e6..5bba3b6 100644
--- a/ethosu/vela/npu_performance.py
+++ b/ethosu/vela/npu_performance.py
@@ -444,8 +444,8 @@
         npu_block_type = primary_op.type.npu_block_type
 
         ifm_tensor, _, weight_tensor, ofm_tensor = ps.get_primary_op_ifm_ifm2_weights_ofm()
-        ifm_tensor_shape = ps.primary_op.ifm_shapes[0].clone()
-        ofm_tensor_shape = ps.primary_op.ofm_shapes[0].clone()
+        ifm_tensor_shape = ps.primary_op.ifm_shapes[0]
+        ofm_tensor_shape = ps.primary_op.ofm_shapes[0]
         ofm_block.width = min(ofm_block.width, ofm_tensor_shape.width)
         ofm_block.height = min(ofm_block.height, ofm_tensor_shape.height)
         ofm_block.depth = min(ofm_block.depth, ofm_tensor_shape.depth)
@@ -480,9 +480,10 @@
 
             batch_size = ifm_tensor_shape.batch
 
-            # add in padding
-            ifm_tensor_shape.height += explicit_padding[0] + explicit_padding[2]  # height += top and bottom
-            ifm_tensor_shape.width += explicit_padding[1] + explicit_padding[3]  # width  += left and right
+            # add in padding, height += top and bottom, width  += left and right
+            ifm_tensor_shape = ifm_tensor_shape.add(
+                0, explicit_padding[0] + explicit_padding[2], explicit_padding[1] + explicit_padding[3], 0
+            )
 
             if npu_block_type != NpuBlockType.Pooling:
                 if npu_block_type == NpuBlockType.ReduceSum: