[MLBEDSW-3891] Fix reading back in an ethos-u custom op

Fixed assertion when reading back in an ethos-u custom op.

Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Change-Id: I275ec9187ffead1e96f2522ecbd658328fa4ef69
diff --git a/ethosu/vela/architecture_features.py b/ethosu/vela/architecture_features.py
index 33e7add..f7d5fd6 100644
--- a/ethosu/vela/architecture_features.py
+++ b/ethosu/vela/architecture_features.py
@@ -275,6 +275,8 @@
             TensorPurpose.Weights: self.permanent_storage_mem_area,
             TensorPurpose.FeatureMap: self.feature_map_storage_mem_area,
             TensorPurpose.LUT: self.permanent_storage_mem_area,
+            TensorPurpose.Scratch: self.feature_map_storage_mem_area,
+            TensorPurpose.ScratchFast: self.fast_storage_mem_area,
         }
 
         self.tensor_storage_mem_type = {
@@ -282,6 +284,8 @@
             TensorPurpose.Weights: MemType.Permanent_NPU,
             TensorPurpose.FeatureMap: MemType.Scratch,
             TensorPurpose.LUT: MemType.Scratch,
+            TensorPurpose.Scratch: MemType.Scratch,
+            TensorPurpose.ScratchFast: MemType.Scratch_fast,
         }
 
         self.min_block_sizes = {
diff --git a/ethosu/vela/mark_tensors.py b/ethosu/vela/mark_tensors.py
index 5a47584..114649d 100644
--- a/ethosu/vela/mark_tensors.py
+++ b/ethosu/vela/mark_tensors.py
@@ -24,7 +24,7 @@
 
 
 def get_format(purpose, arch):
-    if purpose in (TensorPurpose.FeatureMap, TensorPurpose.LUT, TensorPurpose.Scratch):
+    if purpose in (TensorPurpose.FeatureMap, TensorPurpose.LUT, TensorPurpose.Scratch, TensorPurpose.ScratchFast):
         fmt = arch.default_feature_map_format
     elif purpose == TensorPurpose.Weights:
         fmt = arch.default_weight_format
@@ -46,7 +46,11 @@
     tens.mem_area = arch.tensor_storage_mem_area[tens.purpose]
     tens.mem_type = arch.tensor_storage_mem_type[tens.purpose]
 
-    if len(tens.ops) == 1 and tens.ops[0].type == Op.Const:
+    if (
+        len(tens.ops) == 1
+        and tens.ops[0].type == Op.Const
+        and purpose not in (TensorPurpose.Scratch, TensorPurpose.ScratchFast)
+    ):
         tens.mem_area = arch.permanent_storage_mem_area  # special case constants, as they must be in permanent storage
         tens.mem_type = MemType.Permanent_NPU
 
@@ -79,6 +83,11 @@
             if scratch_tensor.name.endswith("_scratch"):
                 scratch_tensor.purpose = TensorPurpose.Scratch
 
+        if len(op.inputs) >= 4:
+            scratch_fast_tensor = op.inputs[3]  # should be existing scratch fast tensor
+            if scratch_fast_tensor.name.endswith("_scratch_fast"):
+                scratch_fast_tensor.purpose = TensorPurpose.ScratchFast
+
         if scratch_tensor is None:
             op.error("Scratch tensor not found.")
 
diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py
index 093e877..fb877ca 100644
--- a/ethosu/vela/tensor.py
+++ b/ethosu/vela/tensor.py
@@ -111,15 +111,20 @@
     Weights = 1
     FeatureMap = 2
     Scratch = 3
-    LUT = 4
-    FSBias = 5
-    Size = 6
+    ScratchFast = 4
+    LUT = 5
+    FSBias = 6
+    Size = 7
 
     def display_name(self) -> str:
-        return ("Unknown", "Weights", "FeatureMap", "Scratch", "LUT", "FastStorageBias", "Size")[self.value]
+        return ("Unknown", "Weights", "FeatureMap", "Scratch", "ScratchFast", "LUT", "FastStorageBias", "Size")[
+            self.value
+        ]
 
     def identifier_name(self) -> str:
-        return ("unknown", "weights", "feature_map", "scratch", "lut", "fast_storage_bias", "size")[self.value]
+        return ("unknown", "weights", "feature_map", "scratch", "scratch_fast", "lut", "fast_storage_bias", "size")[
+            self.value
+        ]
 
     @staticmethod
     def all():
diff --git a/ethosu/vela/tflite_writer.py b/ethosu/vela/tflite_writer.py
index 18905e3..82a063f 100644
--- a/ethosu/vela/tflite_writer.py
+++ b/ethosu/vela/tflite_writer.py
@@ -411,16 +411,19 @@
         subgraph_idx = np.int32(len(self.subgraphs_to_write))  # Only 1 supported currently
         nbr_tensors = np.int32(len(self.tensor_map))
 
-        # An offset of -1 indicates that the tensor will be allocated online by Tensorflow Lite Micro
-        offsets = [np.int32(-1)] * nbr_tensors
+        if not any([name == b"OfflineMemoryAllocation" for name, _ in self.nng.metadata]):
+            # An offset of -1 indicates that the tensor will be allocated online by Tensorflow Lite Micro
+            offsets = [np.int32(-1)] * nbr_tensors
 
-        # Ensure that the order of the offsets match the order of the tensors
-        for tens, idx in self.tensor_map.items():
-            # Set offsets for tensor allocated in Tensor Arena or in the scratch_fast area
-            if tens.mem_type in (MemType.Scratch, MemType.Scratch_fast):
-                offsets[idx] = np.int32(tens.address) if tens.address is not None else np.int32(0)
+            # Ensure that the order of the offsets match the order of the tensors
+            for tens, idx in self.tensor_map.items():
+                # Set offsets for tensor allocated in Tensor Arena or in the scratch_fast area
+                if tens.mem_type in (MemType.Scratch, MemType.Scratch_fast):
+                    offsets[idx] = np.int32(tens.address) if tens.address is not None else np.int32(0)
 
-        self.nng.metadata.append(("OfflineMemoryAllocation", np.array([version, subgraph_idx, nbr_tensors] + offsets)))
+            self.nng.metadata.append(
+                ("OfflineMemoryAllocation", np.array([version, subgraph_idx, nbr_tensors] + offsets))
+            )
 
         metadata_list = []
         for name, buffer in self.nng.metadata: