blob: 7dc2d722a4f9ccec8c4a79dcf4c2e5e6efaa2183 [file] [log] [blame]
erik.andersson@arm.comad45f792021-02-03 10:20:16 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Neural network graph classes and enums.
18# Pass - A packed pass containing one or more Operations.
19# CascadedPass - A scheduled pass containing one or more Passes, as well as a scheduling strategy and block
20# configurations.
21# Subgraph - Holds a neural network subgraph, pointing at Tensors, Operations, Passes, and CascadedPasses.
22# Graph - A full neural network graph with one or more Subgraphs.
Tim Hall79d07d22020-04-27 18:20:16 +010023import enum
patrik.gustavssoneeb85152020-12-21 17:10:40 +000024from typing import List
Tim Hall79d07d22020-04-27 18:20:16 +010025
Louis Verhaardaee5d752020-09-30 09:01:52 +020026from .operation import Op
patrik.gustavssoneeb85152020-12-21 17:10:40 +000027from .shape4d import Shape4D
Louis Verhaardaee5d752020-09-30 09:01:52 +020028
Tim Hall79d07d22020-04-27 18:20:16 +010029
30class PassPlacement(enum.Enum):
31 Unknown = 0
32 Cpu = 1
33 Npu = 2
34 MemoryOnly = 3
35 StartupInit = 4
36
37
38class TensorAllocator(enum.Enum):
39 LinearAlloc = 1
40 Greedy = 2
Louis Verhaardd7002522021-01-20 17:23:54 +010041 HillClimb = 3
Tim Hall79d07d22020-04-27 18:20:16 +010042
43 def __str__(self):
44 return self.name
45
46
47class Pass:
48 def __init__(self, name, placement, is_element_wise, npu_block_type):
49 self.inputs = []
50 self.intermediates = []
51 self.outputs = []
52 self.ops = []
53 self.primary_op = None
54 self.ifm_tensor = None
55 self.ifm2_tensor = None
56 self.ofm_tensor = None
57 self.weight_tensor = None
58 self.scale_tensor = None
Fredrik Svedberga0c36242020-06-03 15:43:31 +020059 self.lut_tensor = None
Tim Hall79d07d22020-04-27 18:20:16 +010060 self.name = name
61 self.cascade = None
62 self.placement = placement
patrik.gustavssoneeb85152020-12-21 17:10:40 +000063 self.ifm_shapes: List[Shape4D] = []
64 self.ofm_shapes: List[Shape4D] = []
Tim Hall79d07d22020-04-27 18:20:16 +010065
66 # TODO: rename is_element_wise because it is not the same as an ElementWise operator. It is used by the tensor
67 # allocation and requires that the OFM and IFM has the exact same address. Essentially complete overlap.
68 self.is_element_wise = is_element_wise
69 self.npu_block_type = npu_block_type
70 self.block_config = None # will be filled in by scheduler
71 self.shared_buffer = None # will be filled in by scheduler
Tim Halld8339a72021-05-27 18:49:40 +010072 self.scheduling_info = None # will be filled in by scheduler
Tim Hall79d07d22020-04-27 18:20:16 +010073
74 self.predecessors = []
75 self.successors = []
76
77 def __str__(self):
78 return "<nng.Pass '%s', %s, ops=%s>" % (self.name, self.placement, [op.type for op in self.ops])
79
80 __repr__ = __str__
81
82 def get_primary_op_ifm_weights(self):
83 if not self.primary_op:
84 return None, None
85 return self.primary_op.get_ifm_ifm2_weights_ofm()[::2]
86
87 def get_primary_op_ifm_ifm2_weights_ofm(self):
88 if not self.primary_op:
89 return None, None, None, None
90 return self.primary_op.get_ifm_ifm2_weights_ofm()
91
92 def get_primary_op_ifm_weights_biases_ofm(self):
93 if not self.primary_op:
94 return None, None, None, None
95 return self.primary_op.get_ifm_weights_biases_ofm()
96
Fredrik Svedberga0c36242020-06-03 15:43:31 +020097 def get_primary_op_lut(self):
98 if not self.primary_op:
99 return None
100 return self.primary_op.activation_lut
101
Tim Hall79d07d22020-04-27 18:20:16 +0100102
103class SchedulingStrategy(enum.Enum):
104 Unknown = -1
105 IfmStream = 0
106 WeightStream = 1
107
108
109class SchedulerRewrite(enum.Enum):
110 Nop = 0
111 ChangeTensorSubPurpose = 1
112
113
114class CascadedPass:
115 def __init__(self, name, strat, inputs, intermediates, outputs, passes, placement, is_element_wise):
116 self.name = name
117 self.strategy = strat
118 self.inputs = inputs
119 self.intermediates = intermediates
120 self.outputs = outputs
121 self.passes = passes
122 self.placement = placement
123 self.is_element_wise = is_element_wise
124
125 self.predecessors = []
126 self.successors = []
Tim Halld8339a72021-05-27 18:49:40 +0100127 self.sram_used = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100128
129 def __str__(self):
130 return "<nng.CascadedPass strategy=%s x %s '%s', passes=%s, block_configs=%s>" % (
131 self.strategy,
132 len(self.passes),
133 self.name,
134 [ps.name for ps in self.passes],
135 [ps.block_config for ps in self.passes],
136 )
137
138 __repr__ = __str__
139
140
141class Subgraph:
142 def __init__(self, name="<unnamed>", placement=PassPlacement.Cpu):
143 self.output_tensors = []
144 self.input_tensors = []
145 self.original_inputs = [] # Preserve the original input order
146 self.passes = []
147 self.cascaded_passes = []
148 self.name = name
149 self.high_level_command_stream = []
150 self.placement = placement
151 self.command_stream_tensor = None
152 self.flash_tensor = None
Louis Verhaard0b9c9a32020-09-15 14:05:38 +0200153 # Scratch information locally used in the scheduler
Tim Halld8339a72021-05-27 18:49:40 +0100154 self.schedule = None
155 self.sched_ops = []
156
erik.andersson@arm.comad45f792021-02-03 10:20:16 +0100157 self.generated_stream_id = None
Tim Hall79d07d22020-04-27 18:20:16 +0100158
159 self.memory_used = {}
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200160 self.memory_used_per_type = {}
Tim Hall79d07d22020-04-27 18:20:16 +0100161
162 def __str__(self):
163 return "<nng.Subgraph '%s', n_passes=%d, n_cascaded_passes=%d>" % (
164 self.name,
165 len(self.passes),
166 len(self.cascaded_passes),
167 )
168
169 __repr__ = __str__
170
171 def update_consumers(self):
172 visit_op_set = set()
173 visit_tensor_set = set()
174 self.input_tensors = []
175
176 print_visit = False
177
178 def visit_op(op):
179 if op in visit_op_set:
180 return
181
182 visit_op_set.add(op)
183 for inp in op.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200184 if not inp:
185 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100186 if print_visit:
187 print(inp, "adding consumer", op)
188 visit_tensor(inp)
189 inp.consumer_list.append(op)
190
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000191 if op.type in (Op.Placeholder, Op.SubgraphInput):
Tim Hall79d07d22020-04-27 18:20:16 +0100192 assert len(op.outputs) == 1
193 self.input_tensors.append(op.outputs[0])
194
195 for out in op.outputs:
196 if out not in visit_tensor_set:
197 out.consumer_list = [] # reset unvisited output, just in case
198
199 def visit_tensor(tens):
200 if tens in visit_tensor_set:
201 return
202 visit_tensor_set.add(tens)
203 tens.consumer_list = []
204 for op in tens.ops:
205 visit_op(op)
206
207 for ps in self.passes:
208 for tens in ps.outputs + ps.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200209 if not tens:
210 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100211 tens.consumer_list = [] # reset unvisited tensors to start with
212
213 for tens in self.output_tensors:
214 visit_tensor(tens)
215 tens.consumer_list.append(None) # special op to indicate that the graph consumes the result
216
217 print_visit = True
218 for ps in self.passes:
219 for op in ps.ops:
220 visit_op(op)
221 for tens in ps.inputs:
222 visit_tensor(tens)
223
224 def build_pass_links(self):
225 for idx, ps in enumerate(self.passes):
226 ps.time = 2 * idx
227 ps.predecessors = []
228 ps.successors = []
229
230 for ps in self.passes:
231 for tens in ps.inputs:
232 for op in tens.ops:
233 pred_pass = op.scheduled_pass
234 assert pred_pass.time < ps.time
235 if ps not in pred_pass.successors:
236 pred_pass.successors.append(ps)
237
238 if pred_pass not in ps.predecessors:
239 ps.predecessors.append(pred_pass)
240
241 assert tens in pred_pass.outputs
242
243 def build_pass_dag_predecessors(self):
244 for ps in self.passes:
245 ps.dag_predecessors = []
246
247 class State(enum.Enum):
248 NotVisited = 0
249 BeingVisited = 1
250 Visited = 2
251
252 pass_visit_dict = {}
253
254 def visit_pass(ps):
255 state = pass_visit_dict.get(ps, State.NotVisited)
256 if state == State.Visited:
257 return True
258 elif state == State.BeingVisited:
259 return False # this is a loop, need to remove this link
260 elif state == State.NotVisited:
261 pass_visit_dict[ps] = State.BeingVisited
262
263 ps.dag_predecessors = []
264 for pred in ps.predecessors:
265 if visit_pass(pred):
266 ps.dag_predecessors.append(pred)
267
268 pass_visit_dict[ps] = State.Visited
269 return True
270
271 for ps in self.passes:
272 if not ps.successors:
273 visit_pass(ps)
274
275 def build_cascaded_pass_links(self):
276 for cps in self.cascaded_passes:
277 cps.predecessors = []
278 cps.successors = []
279
280 for cps in self.cascaded_passes:
281 for tens in cps.inputs:
282 for op in tens.ops:
283 pred_cpass = op.scheduled_pass.cascade
284 if cps not in pred_cpass.successors:
285 pred_cpass.successors.append(cps)
286
287 if pred_cpass not in cps.predecessors:
288 cps.predecessors.append(pred_cpass)
289
290 assert tens in pred_cpass.outputs
291
292 def refresh_after_modification(self):
293 self.update_consumers()
294
295 def prune_startup_init_pass(self):
296 assert len(self.passes) >= 1
297 ps = self.passes[0]
298 assert ps.placement == PassPlacement.StartupInit
299
300 ps.outputs = [out_tens for out_tens in ps.outputs if len(out_tens.consumers()) > 0]
301 ps.ops = [op for op in ps.ops if op.outputs[0] in ps.outputs]
302
303 def get_all_ops(self):
304 all_ops = []
305 visit_op_set = set()
306 visit_tensor_set = set()
307
308 def visit_op(op):
309 if op in visit_op_set:
310 return
311 visit_op_set.add(op)
312 for inp in op.inputs:
313 visit_tensor(inp)
314
315 all_ops.append(op)
316
317 def visit_tensor(tens):
Andreas Nevalainene1cc3de2020-09-08 15:31:02 +0200318 if tens is None or tens in visit_tensor_set:
Tim Hall79d07d22020-04-27 18:20:16 +0100319 return
320 visit_tensor_set.add(tens)
321 for op in tens.ops:
322 visit_op(op)
323
324 for tens in self.output_tensors:
325 visit_tensor(tens)
326
327 return all_ops
328
329 def print_operators(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100330 print("print_operators()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100331 all_ops = self.get_all_ops()
332 unique_ops = []
Tim Hall79d07d22020-04-27 18:20:16 +0100333 for op in all_ops:
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000334 if op.type in (Op.Const, Op.Identity, Op.Placeholder):
Tim Hall79d07d22020-04-27 18:20:16 +0100335 continue
336
Louis Verhaardaee5d752020-09-30 09:01:52 +0200337 attrs = op.attrs.copy()
338 if op.type in (Op.Conv2D, Op.Conv2DBias, Op.DepthwiseConv2DBias):
Tim Hall79d07d22020-04-27 18:20:16 +0100339 kshape = op.inputs[1].shape
340 attrs["kshape"] = [kshape[0], kshape[1]]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200341 attrs["type"] = op.type.name
Tim Hall79d07d22020-04-27 18:20:16 +0100342 attrs.pop("use_cudnn_on_gpu", None)
Fredrik Svedberg16343052021-04-16 14:36:22 +0200343 custom_options = attrs.pop("custom_options", None)
Tim Hall79d07d22020-04-27 18:20:16 +0100344 if attrs not in unique_ops:
345 unique_ops.append(attrs)
346 # print attributes in human readable format
347 a = attrs.copy()
Fredrik Svedberg16343052021-04-16 14:36:22 +0200348 if custom_options is not None:
349 a["custom_options"] = custom_options
Tim Hall79d07d22020-04-27 18:20:16 +0100350 s = a.pop("type")
351 data_format = a.pop("data_format", None)
352 if data_format and data_format != b"NHWC":
353 s += " " + str(data_format)
354 t = a.pop("T", None)
355 if t:
356 s += " " + str(t)[9:-2]
357 srct = a.pop("SrcT", None)
358 if srct:
359 s += " " + str(srct)[9:-2]
360 dstt = a.pop("DstT", None)
361 if dstt:
362 s += "->" + str(dstt)[9:-2]
363 print(s + " " + str(a))
364
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200365 def print_graph(self, label=None):
366 if label:
367 print(f"\n[ {label} ]")
Michael McGeagh775e3962020-07-28 11:44:22 +0100368 print("print_graph()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100369 all_ops = self.get_all_ops()
370 for idx, op in enumerate(all_ops):
371 print(idx, op.type, op.name)
372
373 def print_graph_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100374 print("print_graph_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100375 all_ops = self.get_all_ops()
376 for idx, op in enumerate(all_ops):
377 print(idx, op.type, op.name)
378 for idx, tens in enumerate(op.inputs):
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200379 print(
380 " Input %02d %20s %20s %20s %s"
381 % (idx, tens.purpose.name, tens.mem_area.name, tens.mem_type.name, tens)
382 )
Tim Hall79d07d22020-04-27 18:20:16 +0100383 for idx, tens in enumerate(op.outputs):
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200384 print(
385 " Output %02d %20s %20s %20s %s"
386 % (idx, tens.purpose.name, tens.mem_area.name, tens.mem_type.name, tens)
387 )
Tim Hall79d07d22020-04-27 18:20:16 +0100388 print()
389
390 def print_graph_with_tensor_quantization(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100391 print("print_graph_with_tensor_quantization()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100392 all_ops = self.get_all_ops()
393 for idx, op in enumerate(all_ops):
394 print(idx, op.type, op.name)
395 for idx, tens in enumerate(op.inputs):
396 q = tens.quantization
397 if q is None:
398 print(" Input %02d %10s NO QUANTIZATION INFO %s" % (idx, tens.dtype, tens.name))
399 else:
400 print(
401 " Input %02d %10s min=%s max=%s scale=%s zero_point=%s %s"
402 % (idx, tens.dtype, q.min, q.max, q.scale_f32, q.zero_point, tens.name)
403 )
404 for idx, tens in enumerate(op.outputs):
405 q = tens.quantization
406 if q is None:
407 print(" Output %02d %10s NO QUANTIZATION INFO %s" % (idx, tens.dtype, tens.name))
408 else:
409 print(
410 " Output %02d %10s min=%s max=%s scale=%s zero_point=%s %s"
411 % (idx, tens.dtype, q.min, q.max, q.scale_f32, q.zero_point, tens.name)
412 )
413 print()
414
415 def print_passes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100416 print("print_passes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100417 for idx, ps in enumerate(self.passes):
418 print("%03d %s" % (idx * 2, ps))
419
420 def print_passes_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100421 print("print_passes_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100422 for idx, ps in enumerate(self.passes):
423 print("%3d %s" % (idx * 2, ps))
424 for idx, tens in enumerate(ps.inputs):
425 print(
426 " Input %2d %-15s %-15s %-15s %s"
427 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
428 )
429 for idx, tens in enumerate(ps.intermediates):
430 print(
431 " Intermediate %2d %-15s %-15s %-15s %s"
432 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
433 )
434 for idx, tens in enumerate(ps.outputs):
435 print(
436 " Output %2d %-15s %-15s %-15s %s"
437 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
438 )
439 print()
440
441 def print_cascaded_passes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100442 print("print_cascaded_passes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100443 for idx, ps in enumerate(self.cascaded_passes):
444 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
445
446 def print_cascaded_passes_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100447 print("print_cascaded_passes_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100448 for idx, ps in enumerate(self.cascaded_passes):
449 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
450 for idx, tens in enumerate(ps.inputs):
451 print(
452 " Input %2d %-15s %-15s %-15s %s"
453 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
454 )
455 for idx, tens in enumerate(ps.intermediates):
456 print(
457 " Intermediate %2d %-15s %-15s %-15s %s"
458 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
459 )
460 for idx, tens in enumerate(ps.outputs):
461 print(
462 " Output %2d %-15s %-15s %-15s %s"
463 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
464 )
465 print()
466
467 def print_cascaded_passes_with_tensor_sizes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100468 print("print_cascaded_passes_with_tensor_sizes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100469 for idx, ps in enumerate(self.cascaded_passes):
470 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
471 for idx, tens in enumerate(ps.inputs):
472 print(
473 " Input %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
474 % (
475 idx,
476 tens.storage_size() / 1024,
477 tens.storage_shape,
478 tens.mem_area.name,
479 tens.purpose.name,
480 tens.format.name,
481 tens.name,
482 )
483 )
484 for idx, tens in enumerate(ps.intermediates):
485 print(
486 " Intermediate %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
487 % (
488 idx,
489 tens.storage_size() / 1024,
490 tens.storage_shape,
491 tens.mem_area.name,
492 tens.purpose.name,
493 tens.format.name,
494 tens.name,
495 )
496 )
497 for idx, tens in enumerate(ps.outputs):
498 print(
499 " Output %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
500 % (
501 idx,
502 tens.storage_size() / 1024,
503 tens.storage_shape,
504 tens.mem_area.name,
505 tens.purpose.name,
506 tens.format.name,
507 tens.name,
508 )
509 )
510 print()
511
512 def print_high_level_command_stream(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100513 print("print_high_level_command_stream()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100514 for idx, cmd in enumerate(self.high_level_command_stream):
515 print("%3d %s" % (idx, cmd))
516
517
518class Graph:
519 def __init__(self, name="<unnamed>", batch_size=1):
520 self.name = name
521 self.batch_size = batch_size
522 self.subgraphs = []
Michael McGeagh22f74e12020-08-07 16:21:03 +0100523 self.metadata = []
Tim Hall79d07d22020-04-27 18:20:16 +0100524 self.memory_used = {}
Diqing Zhongdb5124c2021-01-11 12:52:48 +0100525 self.total_original_weights = 0
Fredrik Svedbergf5c07c42021-04-23 14:36:42 +0200526 self.total_npu_weights = 0
527 self.total_npu_encoded_weights = 0
Louis Verhaard3c07c972020-05-07 08:12:58 +0200528 self.weight_cache = None # See CompressedWeightCache
Tim Hall79d07d22020-04-27 18:20:16 +0100529
530 def get_root_subgraph(self):
531 return self.subgraphs[0]
532
533 def prune_startup_init_pass(self):
534 for sg in self.subgraphs:
535 sg.prune_startup_init_pass()
536
537 def update_consumers(self):
538 for sg in self.subgraphs:
539 sg.update_consumers()
540
541 def refresh_after_modification(self):
542 for sg in self.subgraphs:
543 sg.refresh_after_modification()
544
545 def print_operators(self):
546 for sg in self.subgraphs:
547 sg.print_operators()
548
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200549 def print_graph(self, label=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100550 for sg in self.subgraphs:
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200551 sg.print_graph(label)
Tim Hall79d07d22020-04-27 18:20:16 +0100552
553 def print_graph_with_tensors(self):
554 for sg in self.subgraphs:
555 sg.print_graph_with_tensors()
556
557 def print_graph_with_tensor_quantization(self):
558 for sg in self.subgraphs:
559 sg.print_graph_with_tensor_quantization()
560
561 def print_passes(self):
562 for sg in self.subgraphs:
563 sg.print_passes()
564
565 def print_passes_with_tensors(self):
566 for sg in self.subgraphs:
567 sg.print_passes_with_tensors()
568
569 def print_cascaded_passes(self):
570 for sg in self.subgraphs:
571 sg.print_cascaded_passes()
572
573 def print_cascaded_passes_with_tensors(self):
574 for sg in self.subgraphs:
575 sg.print_cascaded_passes_with_tensors()
576
577 def print_cascaded_passes_with_tensor_sizes(self):
578 for sg in self.subgraphs:
579 sg.print_cascaded_passes_with_tensor_sizes()
580
581 def print_high_level_command_stream(self):
582 for sg in self.subgraphs:
583 sg.print_high_level_command_stream()