MLBEDSW-6969 Remove RescaleAdd and RescaleMul operators
Removed RescaleAdd and RescaleMul operators in favour of
Operation.explicit_scale and removed Operation.rescale.
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: Idccd8851731d4bb8d4e84970e0fd6b409d7d4e45
diff --git a/ethosu/vela/softmax.py b/ethosu/vela/softmax.py
index 9565bc5..1655427 100644
--- a/ethosu/vela/softmax.py
+++ b/ethosu/vela/softmax.py
@@ -28,6 +28,7 @@
from .data_type import DataType
from .debug_database import DebugDatabase
from .operation import ActivationFunction
+from .operation import ExplicitScaling
from .operation import Op
from .operation import Operation
from .operation_util import create_add
@@ -35,7 +36,6 @@
from .operation_util import create_depthwise_maxpool
from .operation_util import create_mul
from .operation_util import create_reduce_sum
-from .operation_util import create_rescale_add
from .operation_util import create_shl
from .operation_util import create_shr
from .operation_util import create_sub
@@ -351,16 +351,15 @@
f0_one_const = create_const_tensor(
"F0_one_const", [1, 1, 1, 1], DataType.int32, [(1 << 31) - 1], np.int32, quantization=no_scale_quant
)
- half_denominator = add_op_get_ofm(
- create_rescale_add(
- f"{self.op.name}_add{pass_number}",
- f0_one_const,
- shifted_sum_minus_one,
- (1, 1), # Custom rescale
- one_scale_quant,
- activation,
- )
+ add_op = create_add(
+ f"{self.op.name}_add{pass_number}",
+ f0_one_const,
+ shifted_sum_minus_one,
+ one_scale_quant,
+ activation,
)
+ add_op.explicit_scaling = ExplicitScaling(False, shift=[1], multiplier=[1]) # Custom rescale
+ half_denominator = add_op_get_ofm(add_op)
# PASS 11 - Multiply
neg_32_over_17 = create_const_tensor(