[MLBEDSW-3813] Fix LSTM operator pass through

Fixed pass through of LSTM operator.

Change-Id: I23140c69ab6cdc83f6bb8129256b4cc6a7c5ffac
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index 16431be..e4d11be 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -407,6 +407,7 @@
         "attrs",
         "inputs",
         "outputs",
+        "intermediates",
         "flops",
         "scheduled_pass",
         "run_on_npu",
@@ -427,6 +428,7 @@
         self.attrs: Dict[str, Any] = {}
         self.inputs: List[Tensor] = []
         self.outputs: List[Tensor] = []
+        self.intermediates: List[Tensor] = []
         self.flops = 0
         self.run_on_npu = True
         # Fused activation function. If not none: operator code.
@@ -453,6 +455,7 @@
         res.attrs = dict(self.attrs)
         res.inputs = list(self.inputs)
         res.outputs = list(self.outputs)
+        res.intermediates = list(self.intermediates)
         res.flops = self.flops
         res.run_on_npu = self.run_on_npu
         res.activation = None if self.activation is None else self.activation.clone()
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py
index b7d4307..97885d0 100644
--- a/ethosu/vela/tensor.py
+++ b/ethosu/vela/tensor.py
@@ -341,6 +341,7 @@
         "bandwidth_shape",
         "dtype",
         "name",
+        "is_variable",
         "ops",
         "consumer_list",
         "values",
@@ -378,6 +379,7 @@
         self.bandwidth_shape = shape
         self.dtype = dtype
         self.name = name
+        self.is_variable = False
         self.equivalence_id: UUID = uuid.uuid4()
 
         self.ops: List[Operation] = []
diff --git a/ethosu/vela/tflite_reader.py b/ethosu/vela/tflite_reader.py
index ae99c33..daea1bf 100644
--- a/ethosu/vela/tflite_reader.py
+++ b/ethosu/vela/tflite_reader.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
+# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
 #
 # SPDX-License-Identifier: Apache-2.0
 #
@@ -109,6 +109,7 @@
         dtype = datatype_map[tens_dtype]
         tens = Tensor(shape, dtype, name)
         quant = tens_data.Quantization()
+        tens.is_variable = tens_data.IsVariable()
 
         tens.quantization = QuantizationParameters()
         if quant is not None:
@@ -144,6 +145,10 @@
         op_type, opt_serializer, custom_code = self.graph.operator_codes[op_data.OpcodeIndex()]
         inputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.InputsAsNumpy()]
         outputs = [self.tensors[idx] if idx != -1 else None for idx in op_data.OutputsAsNumpy()]
+        intermediates = []
+        if op_data.IntermediatesLength():
+            intermediates = [self.tensors[idx] if idx != -1 else None for idx in op_data.IntermediatesAsNumpy()]
+
         name = "unknown_op_name"
         if len(outputs):
             name = outputs[0].name
@@ -151,6 +156,7 @@
         op.op_index = op_index
         op.inputs = inputs
         op.outputs = outputs
+        op.intermediates = intermediates
         for out in op.outputs:
             out.ops = [op]
 
diff --git a/ethosu/vela/tflite_writer.py b/ethosu/vela/tflite_writer.py
index e190a74..687b887 100644
--- a/ethosu/vela/tflite_writer.py
+++ b/ethosu/vela/tflite_writer.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
+# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
 #
 # SPDX-License-Identifier: Apache-2.0
 #
@@ -203,6 +203,7 @@
     def serialise_quantization_parameters(self, quant):
         builder = self.builder
 
+        qp = None
         min = None
         max = None
         scale = None
@@ -217,16 +218,18 @@
             if quant.zero_point is not None:
                 zero_point = self.write_long_vector(make_vector(quant.zero_point))
 
-        QuantizationParameters.QuantizationParametersStart(builder)
-        if min is not None:
-            QuantizationParameters.QuantizationParametersAddMin(builder, min)
-        if max is not None:
-            QuantizationParameters.QuantizationParametersAddMax(builder, max)
-        if scale is not None:
-            QuantizationParameters.QuantizationParametersAddScale(builder, scale)
-        if zero_point is not None:
-            QuantizationParameters.QuantizationParametersAddZeroPoint(builder, zero_point)
-        return QuantizationParameters.QuantizationParametersEnd(builder)
+            QuantizationParameters.QuantizationParametersStart(builder)
+            if min is not None:
+                QuantizationParameters.QuantizationParametersAddMin(builder, min)
+            if max is not None:
+                QuantizationParameters.QuantizationParametersAddMax(builder, max)
+            if scale is not None:
+                QuantizationParameters.QuantizationParametersAddScale(builder, scale)
+            if zero_point is not None:
+                QuantizationParameters.QuantizationParametersAddZeroPoint(builder, zero_point)
+            qp = QuantizationParameters.QuantizationParametersEnd(builder)
+
+        return qp
 
     def serialise_tensor(self, tens):
         builder = self.builder
@@ -258,7 +261,9 @@
         # Empty buffers should be kept unique for TensorFlow Lite Micro
         Tensor.TensorAddBuffer(builder, buf_id)
         Tensor.TensorAddName(builder, name)
-        Tensor.TensorAddQuantization(builder, quant)
+        if quant is not None:
+            Tensor.TensorAddQuantization(builder, quant)
+        Tensor.TensorAddIsVariable(builder, tens.is_variable)
 
         res = Tensor.TensorEnd(builder)
         return res
@@ -266,10 +271,15 @@
     def serialise_operator(self, op):
         builder = self.builder
 
-        inputs_offset = self.write_int_vector([self.tensor_map[tens] for tens in op.inputs if tens in self.tensor_map])
+        inputs_offset = self.write_int_vector(
+            [self.tensor_map[tens] if tens in self.tensor_map else -1 for tens in op.inputs]
+        )
         outputs_offset = self.write_int_vector(
             [self.tensor_map[tens] for tens in op.outputs if tens in self.tensor_map]
         )
+        intermediates_offset = self.write_int_vector(
+            [self.tensor_map[tens] for tens in op.intermediates if tens in self.tensor_map]
+        )
 
         if op.type == Op.Custom:
             op_idx, tflop, opt_serializer = self.operator_code_map[op.type][op.attrs.get("custom_code", "")]
@@ -300,6 +310,7 @@
         Operator.OperatorAddOpcodeIndex(builder, op_idx)
         Operator.OperatorAddInputs(builder, inputs_offset)
         Operator.OperatorAddOutputs(builder, outputs_offset)
+        Operator.OperatorAddIntermediates(builder, intermediates_offset)
 
         if builtin_opt_offset is not None:
             Operator.OperatorAddBuiltinOptionsType(builder, opt_serializer.builtin_opt_type)
@@ -328,7 +339,7 @@
         # This allows us to serialise tensors which arent attached to any specific ops,
         # e.g. due to an empty graph containing no ops
         for op in all_ops + placeholder_ops:
-            for tens in op.inputs + op.outputs:
+            for tens in op.inputs + op.outputs + op.intermediates:
                 if tens is not None:
                     tensor_set.add(tens)