MLBEDSW-1716: Transpose Convolution support

Change-Id: Ie6d8d6de9f3447f19ba06aafa9fa480fc96a973b
Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com>
diff --git a/ethosu/vela/weight_compressor.py b/ethosu/vela/weight_compressor.py
index 9edde60..df2b057 100644
--- a/ethosu/vela/weight_compressor.py
+++ b/ethosu/vela/weight_compressor.py
@@ -234,6 +234,10 @@
         else:
             tens.block_traversal = TensorBlockTraversal.DepthFirst
 
+    if tens.consumer_list[0].type == "Conv2DBackpropInputSwitchedBias":
+        # Transpose Convoluion, reverse weights in H and W axes
+        weights = np.flip(weights, axis=(0,1))
+
     # Slice weight stream up depth-ways into bricks and compress
     full_ofm_depth = quant_buf.shape[-1]
     for idx in range(0, full_ofm_depth, ofm_depth_step):
@@ -273,7 +277,9 @@
     # the connected operator should expect a bias input unless it is a FullyConnected
     assert "Bias" in tens.consumer_list[0].type or tens.consumer_list[0].type.startswith("FullyConnected")
     # the input bias tensor is the same as that connected to the operator
-    assert tens is tens.consumer_list[0].inputs[2]
+    _, _, bias_tens, _ = tens.consumer_list[0].get_ifm_weights_biases_ofm()
+    assert tens is bias_tens
+
     # the operator should only have a single output
     assert len(tens.consumer_list[0].outputs) == 1