[MLBEDSW-2787] Remove op.attrs["rescale"] in softmax.py

Added RescaleAdd operation to avoid non-standard attribute
"rescale" for Add operation. Also changed ResizeBilinear
in the same way.

Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: I1d286f63890585c06b8a161df1ff77e3f844a4b9
diff --git a/ethosu/vela/high_level_command_to_npu_op.py b/ethosu/vela/high_level_command_to_npu_op.py
index 0711702..8e4d33a 100644
--- a/ethosu/vela/high_level_command_to_npu_op.py
+++ b/ethosu/vela/high_level_command_to_npu_op.py
@@ -91,6 +91,7 @@
 elementwise_op_map = {
     Op.Mul: NpuElementWiseOp.MUL,
     Op.Add: NpuElementWiseOp.ADD,
+    Op.RescaleAdd: NpuElementWiseOp.ADD,
     Op.Sub: NpuElementWiseOp.SUB,
     Op.Minimum: NpuElementWiseOp.MIN,
     Op.Maximum: NpuElementWiseOp.MAX,
@@ -386,8 +387,8 @@
     npu_op = NpuPoolingOperation(pool_op)
     set_common_op_fields(npu_op, cmd, arch)
     # Pooling specific info
-    if op.type == Op.ResizeBilinear and "rescale" in op.attrs:
-        npu_op.rescale = op.attrs["rescale"]
+    if op.type == Op.ResizeBilinear:
+        npu_op.rescale = op.rescale
     return npu_op
 
 
@@ -426,8 +427,9 @@
         output_scale = npu_op.ifm2.quantization.scale_f32
     if op.type == Op.LeakyRelu:
         output_scale = op.attrs["alpha"]
-    if op.type in (Op.Add, Op.Sub) and "rescale" in op.attrs:
-        npu_op.rescale = op.attrs.get("rescale")
+    if op.type == Op.RescaleAdd:
+        assert op.rescale is not None, f"{op.type} must have rescale"
+        npu_op.rescale = op.rescale
     if op.type in (Op.Add, Op.Mul, Op.Sub):
         if op.activation is not None and op.activation.op_type in (Op.Sigmoid, Op.Tanh):
             output_scale = 1 / 0x3000