MLBEDSW-2847: Fix for TransposeConv crash and u8 output diff

Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com>
Change-Id: I2cb3f6639e4bb8a984fa3647ee7b4678ed6f5890
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index 9f92e75..b9aafca 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -156,8 +156,8 @@
         ypad = needed_total_padding(int(input_dims[1]) * upscaling_factor, int(stride[1]), int(kernel_height))
         xpad = needed_total_padding(int(input_dims[2]) * upscaling_factor, int(stride[2]), int(kernel_width))
 
-        right_pad = ((xpad + 1) // upscaling_factor) - 1
-        bottom_pad = ((ypad + 1) // upscaling_factor) - 1
+        right_pad = max(((xpad + 1) // upscaling_factor) - 1, 0)
+        bottom_pad = max(((ypad + 1) // upscaling_factor) - 1, 0)
         left_pad = max(kernel_width - 1 - right_pad, 0)
         top_pad = max(kernel_height - 1 - bottom_pad, 0)
 
@@ -845,7 +845,7 @@
 
 
 def add_bias_tensor(op, arch):
-    if ("Conv2d" in op.type or op.type.startswith("FullyConnected")) and not op.inputs[-1]:
+    if ("conv2d" in op.type.lower() or op.type.startswith("FullyConnected")) and not op.inputs[-1]:
         # Add bias/scale tensor filled with zeros
         weight_shape = op.inputs[1].shape
         weight_sets = weight_shape[-1]
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py
index f0e7ea4..d4f6a40 100644
--- a/ethosu/vela/tensor.py
+++ b/ethosu/vela/tensor.py
@@ -232,7 +232,7 @@
     const_tensor.purpose = purpose
     const_tensor.quantization = quantization
     const_tensor.values = np.array(values, dtype=value_dtype)
-    const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8)
+    const_tensor.quant_values = const_tensor.values
     # Operator
     const_op = Operation("Const", name)
     const_op.set_output_tensor(const_tensor)