Small fix for Softmax regression

Signed-off-by: Jacob Bohlin <jacob.bohlin@arm.com>
Change-Id: I287c24725126c169afec779b921e43c3ab26f739
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py
index 46d26c8..aaccce2 100644
--- a/ethosu/vela/graph_optimiser.py
+++ b/ethosu/vela/graph_optimiser.py
@@ -832,14 +832,14 @@
     return op
 
 
-def add_bias_tensor(op, arch):
-    if ("conv2d" in op.type.lower() or op.type.startswith("FullyConnected")) and not op.inputs[-1]:
-        # Add bias/scale tensor filled with zeros
-        weight_shape = op.inputs[1].shape
-        weight_sets = weight_shape[-1]
-        bias_values = [0] * weight_sets
-        scale_tens = create_const_tensor(op.name + "_bias", [weight_sets], DataType.int32, bias_values)
-        op.set_input_tensor(scale_tens, -1)
+def fixup_bias_tensors(op, arch):
+    if op.needs_bias() and not op.inputs[-1]:
+        # Op has no bias, add bias tensor filled with zeros
+        nr_biases = op.inputs[1].shape[-1]
+        bias_values = [0] * nr_biases
+        bias_tensor = create_const_tensor(op.name + "_bias", [nr_biases], DataType.int32, bias_values)
+        bias_tensor.quant_values = bias_tensor.values
+        op.set_input_tensor(bias_tensor, -1)
 
     return op
 
@@ -870,7 +870,7 @@
         fixup_elementwise_with_scalars,
         reorder_depthwise_weights,
         fixup_resizebilinear,
-        add_bias_tensor,
+        fixup_bias_tensors,
         convert_mul_max_to_abs_or_lrelu,
         convert_lrelu,
     ]
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index c1ca3f8..f7a9509 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -81,12 +81,12 @@
         bias_idx = -1
         ofm_idx = -1
         npu_block_type = self.attrs.get("npu_block_type", NpuBlockType.Default)
-        if npu_block_type in set((NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise)):
+        if npu_block_type in (NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise):
             ifm_idx = 0
             weight_idx = 1
             ofm_idx = 0
 
-            if self.type in set(("Conv2DBiasAct", "DepthwiseConv2dBiasAct", "TransposeConvAct")):
+            if self.type in ("Conv2DBiasAct", "DepthwiseConv2dBiasAct", "TransposeConvAct"):
                 if len(self.inputs) >= 3:
                     bias_idx = 2
 
@@ -101,7 +101,7 @@
             weight_idx = 1
             ofm_idx = 0
 
-            if self.type in set(("FullyConnectedAct",)):
+            if self.type == "FullyConnectedAct":
                 if len(self.inputs) >= 3:
                     bias_idx = 2
 
@@ -116,7 +116,7 @@
             ofm_idx = 0
 
             # LeakyRelu, Abs and CLZ have a single IFM
-            if self.type in set(("LeakyRelu", "Abs", "CLZ")):
+            if self.type in ("LeakyRelu", "Abs", "CLZ"):
                 ifm2_idx = -1
 
         elif self.type == "Conv2DBackpropInput":
@@ -124,7 +124,7 @@
             weight_idx = 1
             ofm_idx = 0
 
-        elif self.type in set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims")):
+        elif self.type in ("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims"):
             ifm_idx = 0
             ofm_idx = 0
 
@@ -149,7 +149,7 @@
         weight_tensor = None
         ofm_tensor = None
 
-        ifm_idx, ifm2_idx, weight_idx, bias_idx, ofm_idx = self.get_ifm_ifm2_weight_bias_ofm_indices()
+        ifm_idx, ifm2_idx, weight_idx, _, ofm_idx = self.get_ifm_ifm2_weight_bias_ofm_indices()
         if ifm_idx != -1:
             ifm_tensor = self.inputs[ifm_idx]
         if ifm2_idx != -1:
@@ -180,7 +180,7 @@
         return ifm_tensor, weight_tensor, bias_tensor, ofm_tensor
 
     def is_concat_op(self):
-        return self.type in set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped"))
+        return self.type in ("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped")
 
     def get_concat_inputs_axis(self):
         assert self.is_concat_op()
@@ -215,7 +215,7 @@
         return dilation_h, dilation_w
 
     def is_split_op(self):
-        return self.type in set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped"))
+        return self.type in ("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped")
 
     def get_split_inputs_axis(self):
         assert self.is_split_op()
@@ -324,3 +324,11 @@
     def set_output_tensor(self, tens):
         tens.ops = [self]
         self.outputs = [tens]
+
+    def needs_bias(self):
+        return self.type in (
+            "Conv2DBiasAct",
+            "DepthwiseConv2dBiasAct",
+            "Conv2DBackpropInputSwitchedBias",
+            "FullyConnectedAct",
+        )
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py
index 3ad9b25..83dc61a 100644
--- a/ethosu/vela/tensor.py
+++ b/ethosu/vela/tensor.py
@@ -232,7 +232,7 @@
     const_tensor.purpose = purpose
     const_tensor.quantization = quantization
     const_tensor.values = np.array(values, dtype=value_dtype)
-    const_tensor.quant_values = const_tensor.values
+    const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8)
     # Operator
     const_op = Operation("Const", name)
     const_op.set_output_tensor(const_tensor)