[MLBEDSW-3813] Fix LSTM operator pass through

Fixed pass through of LSTM operator.

Change-Id: I23140c69ab6cdc83f6bb8129256b4cc6a7c5ffac
Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
diff --git a/ethosu/vela/operation.py b/ethosu/vela/operation.py
index 16431be..e4d11be 100644
--- a/ethosu/vela/operation.py
+++ b/ethosu/vela/operation.py
@@ -407,6 +407,7 @@
         "attrs",
         "inputs",
         "outputs",
+        "intermediates",
         "flops",
         "scheduled_pass",
         "run_on_npu",
@@ -427,6 +428,7 @@
         self.attrs: Dict[str, Any] = {}
         self.inputs: List[Tensor] = []
         self.outputs: List[Tensor] = []
+        self.intermediates: List[Tensor] = []
         self.flops = 0
         self.run_on_npu = True
         # Fused activation function. If not none: operator code.
@@ -453,6 +455,7 @@
         res.attrs = dict(self.attrs)
         res.inputs = list(self.inputs)
         res.outputs = list(self.outputs)
+        res.intermediates = list(self.intermediates)
         res.flops = self.flops
         res.run_on_npu = self.run_on_npu
         res.activation = None if self.activation is None else self.activation.clone()