[MLBEDSW-2846] Do not use NHCWB16 for reduce_sum int32

Added checks for not using NHCWB16 for reduce_sum int32 which makes
int8/uint8 softmax work.

Also enabled softmax graph rewrite by default and fixed a saturation
problem.

Change-Id: Ic01bd9ece7e5c3edb2900b7915cc747efe9e5760
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py
index 567c05c..f57cbee 100644
--- a/ethosu/vela/supported_operators.py
+++ b/ethosu/vela/supported_operators.py
@@ -22,8 +22,7 @@
 
 
 class SupportedOperators:
-    def __init__(self, softmax_support):
-        self.softmax_support = softmax_support
+    def __init__(self):
         # Categorised lists of supported operators
         self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead",))
         self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D",))
@@ -393,9 +392,6 @@
 
     def check_activation_ops(self, op):
         if op.type == "Softmax":
-            if not self.softmax_support:
-                return False
-
             ifm_tensor = op.inputs[0]
             ofm_tensor = op.outputs[0]