MLBEDSW-2306 Added more supported mem-cfgs
Additional supported memory configurations:
-Permanent_storage = DRAM
-Tensor arena either in DRAM or SRAM
Signed-off-by: Patrik Gustavsson <patrik.gustavsson@arm.com>
Change-Id: I20beb7151e306bfdba540e7c0b2a7b478b4d94e1
diff --git a/ethosu/vela/nn_graph.py b/ethosu/vela/nn_graph.py
index ea35c08..247e6cc 100644
--- a/ethosu/vela/nn_graph.py
+++ b/ethosu/vela/nn_graph.py
@@ -137,6 +137,7 @@
self.flash_tensor = None
self.memory_used = {}
+ self.memory_used_per_type = {}
def __str__(self):
return "<nng.Subgraph '%s', n_passes=%d, n_cascaded_passes=%d>" % (
@@ -349,9 +350,15 @@
for idx, op in enumerate(all_ops):
print(idx, op.type, op.name)
for idx, tens in enumerate(op.inputs):
- print(" Input %02d %20s %20s %s" % (idx, tens.purpose.name, tens.mem_area.name, tens))
+ print(
+ " Input %02d %20s %20s %20s %s"
+ % (idx, tens.purpose.name, tens.mem_area.name, tens.mem_type.name, tens)
+ )
for idx, tens in enumerate(op.outputs):
- print(" Output %02d %20s %20s %s" % (idx, tens.purpose.name, tens.mem_area.name, tens))
+ print(
+ " Output %02d %20s %20s %20s %s"
+ % (idx, tens.purpose.name, tens.mem_area.name, tens.mem_type.name, tens)
+ )
print()
def print_graph_with_tensor_quantization(self):