MLBEDSW-7572 Update LSTM with new constant precision
Updated the Q0_15_SCALE constant to match the updated value
in the reference.
Change-Id: Id680748c532d41fea9760ec76c0b65c0c3e73a13
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
diff --git a/ethosu/vela/lstm.py b/ethosu/vela/lstm.py
index 5a50788..ecec58f 100644
--- a/ethosu/vela/lstm.py
+++ b/ethosu/vela/lstm.py
@@ -37,7 +37,7 @@
from .tensor import QuantizationParameters
from .tensor import Tensor
-Q0_15_SCALE = np.float32(0.00003051757)
+Q0_15_SCALE = np.float32(2**-15)
"""Q0.15 scale like the reference defines it"""
@@ -248,7 +248,7 @@
re_fc.ofm.dtype = DataType.int16
# Setup add quantization
q_add = q_fc.clone()
- q_add.scale_f32 = np.float32(2**-15)
+ q_add.scale_f32 = Q0_15_SCALE
# Create add + activation
add = create_add(f"{name}_add", in_fc.ofm, re_fc.ofm, q_add, ActivationFunction(activation))
if activation is Op.Sigmoid:
@@ -309,7 +309,7 @@
base_name = f"output_state#{batch}.{time}"
# Setup tanh quantization
q_out_tanh = QuantizationParameters()
- q_out_tanh.scale_f32 = np.float32(2**-15)
+ q_out_tanh.scale_f32 = Q0_15_SCALE
q_out_tanh.zero_point = 0
# Create tanh(cell state)
tanh = create_fused_activation(Op.Tanh, f"{base_name}_tanh", cell_state, q_out_tanh)