MLBEDSW-2574: Fix issue with different bias tensor scaling

 - In networks that share the scale & bias tensor between operators,
   differences in operator quantization causes conflicting HW packed
   scale & bias values for the tensor. This commit replicates the
   scale and bias tensors per operator, similar to weights handling,
   to avoid this conflct.

Signed-off-by: <tim.hall@arm.com>
Change-Id: Idee1fdf222ec849b6659adb0891b331d162524b7
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index be47cb1..9346b76 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -157,9 +157,11 @@
 
         if op_type.startswith("DepthwiseConv2d") or op_type.startswith("Conv2D"):
             inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 2, 3, 0))
+            inputs[2] = clone_and_reshape_tensor(inputs[2], (0,))
 
         if op_type.startswith("FullyConnected"):
             inputs[1] = clone_and_reshape_tensor(inputs[1], (1, 0))
+            inputs[2] = clone_and_reshape_tensor(inputs[2], (0,))
 
         if opt_serializer is not None:
             op.attrs = opt_serializer.deserialize(op_data)