Small fix for Softmax regression

Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com>
Change-Id: I287c24725126c169afec779b921e43c3ab26f739
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index c1ca3f8..f7a9509 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -81,12 +81,12 @@
         bias_idx = -1
         ofm_idx = -1
         npu_block_type = self.attrs.get("npu_block_type", NpuBlockType.Default)
-        if npu_block_type in set((NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise)):
+        if npu_block_type in (NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise):
             ifm_idx = 0
             weight_idx = 1
             ofm_idx = 0
 
-            if self.type in set(("Conv2DBiasAct", "DepthwiseConv2dBiasAct", "TransposeConvAct")):
+            if self.type in ("Conv2DBiasAct", "DepthwiseConv2dBiasAct", "TransposeConvAct"):
                 if len(self.inputs) >= 3:
                     bias_idx = 2
 
@@ -101,7 +101,7 @@
             weight_idx = 1
             ofm_idx = 0
 
-            if self.type in set(("FullyConnectedAct",)):
+            if self.type == "FullyConnectedAct":
                 if len(self.inputs) >= 3:
                     bias_idx = 2
 
@@ -116,7 +116,7 @@
             ofm_idx = 0
 
             # LeakyRelu, Abs and CLZ have a single IFM
-            if self.type in set(("LeakyRelu", "Abs", "CLZ")):
+            if self.type in ("LeakyRelu", "Abs", "CLZ"):
                 ifm2_idx = -1
 
         elif self.type == "Conv2DBackpropInput":
@@ -124,7 +124,7 @@
             weight_idx = 1
             ofm_idx = 0
 
-        elif self.type in set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims")):
+        elif self.type in ("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims"):
             ifm_idx = 0
             ofm_idx = 0
 
@@ -149,7 +149,7 @@
         weight_tensor = None
         ofm_tensor = None
 
-        ifm_idx, ifm2_idx, weight_idx, bias_idx, ofm_idx = self.get_ifm_ifm2_weight_bias_ofm_indices()
+        ifm_idx, ifm2_idx, weight_idx, _, ofm_idx = self.get_ifm_ifm2_weight_bias_ofm_indices()
         if ifm_idx != -1:
             ifm_tensor = self.inputs[ifm_idx]
         if ifm2_idx != -1:
@@ -180,7 +180,7 @@
         return ifm_tensor, weight_tensor, bias_tensor, ofm_tensor
 
     def is_concat_op(self):
-        return self.type in set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped"))
+        return self.type in ("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped")
 
     def get_concat_inputs_axis(self):
         assert self.is_concat_op()
@@ -215,7 +215,7 @@
         return dilation_h, dilation_w
 
     def is_split_op(self):
-        return self.type in set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped"))
+        return self.type in ("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped")
 
     def get_split_inputs_axis(self):
         assert self.is_split_op()
@@ -324,3 +324,11 @@
     def set_output_tensor(self, tens):
         tens.ops = [self]
         self.outputs = [tens]
+
+    def needs_bias(self):
+        return self.type in (
+            "Conv2DBiasAct",
+            "DepthwiseConv2dBiasAct",
+            "Conv2DBackpropInputSwitchedBias",
+            "FullyConnectedAct",
+        )