blob: 98979f649f64a8e0570fe76b2eff778aed72ecba [file] [log] [blame]
Rickard Bolinbc6ee582022-11-04 08:24:29 +00001# SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Rickard Bolinbc6ee582022-11-04 08:24:29 +000016#
Tim Hall79d07d22020-04-27 18:20:16 +010017# Description:
18# Neural network graph classes and enums.
19# Pass - A packed pass containing one or more Operations.
20# CascadedPass - A scheduled pass containing one or more Passes, as well as a scheduling strategy and block
21# configurations.
22# Subgraph - Holds a neural network subgraph, pointing at Tensors, Operations, Passes, and CascadedPasses.
23# Graph - A full neural network graph with one or more Subgraphs.
Tim Hall79d07d22020-04-27 18:20:16 +010024import enum
patrik.gustavssoneeb85152020-12-21 17:10:40 +000025from typing import List
Tim Hall79d07d22020-04-27 18:20:16 +010026
Louis Verhaardaee5d752020-09-30 09:01:52 +020027from .operation import Op
patrik.gustavssoneeb85152020-12-21 17:10:40 +000028from .shape4d import Shape4D
Louis Verhaardaee5d752020-09-30 09:01:52 +020029
Tim Hall79d07d22020-04-27 18:20:16 +010030
31class PassPlacement(enum.Enum):
32 Unknown = 0
33 Cpu = 1
34 Npu = 2
35 MemoryOnly = 3
36 StartupInit = 4
37
38
39class TensorAllocator(enum.Enum):
40 LinearAlloc = 1
41 Greedy = 2
Louis Verhaardd7002522021-01-20 17:23:54 +010042 HillClimb = 3
Tim Hall79d07d22020-04-27 18:20:16 +010043
44 def __str__(self):
45 return self.name
46
47
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020048class NetworkType(enum.Enum):
49 TFLite = 1
50 TOSA = 2
51
52
Tim Hall79d07d22020-04-27 18:20:16 +010053class Pass:
54 def __init__(self, name, placement, is_element_wise, npu_block_type):
55 self.inputs = []
56 self.intermediates = []
57 self.outputs = []
58 self.ops = []
59 self.primary_op = None
60 self.ifm_tensor = None
61 self.ifm2_tensor = None
62 self.ofm_tensor = None
63 self.weight_tensor = None
64 self.scale_tensor = None
Fredrik Svedberga0c36242020-06-03 15:43:31 +020065 self.lut_tensor = None
Tim Hall79d07d22020-04-27 18:20:16 +010066 self.name = name
67 self.cascade = None
68 self.placement = placement
patrik.gustavssoneeb85152020-12-21 17:10:40 +000069 self.ifm_shapes: List[Shape4D] = []
70 self.ofm_shapes: List[Shape4D] = []
Tim Hall79d07d22020-04-27 18:20:16 +010071
72 # TODO: rename is_element_wise because it is not the same as an ElementWise operator. It is used by the tensor
73 # allocation and requires that the OFM and IFM has the exact same address. Essentially complete overlap.
74 self.is_element_wise = is_element_wise
75 self.npu_block_type = npu_block_type
76 self.block_config = None # will be filled in by scheduler
77 self.shared_buffer = None # will be filled in by scheduler
Tim Halld8339a72021-05-27 18:49:40 +010078 self.scheduling_info = None # will be filled in by scheduler
Tim Hall79d07d22020-04-27 18:20:16 +010079
80 self.predecessors = []
81 self.successors = []
82
83 def __str__(self):
84 return "<nng.Pass '%s', %s, ops=%s>" % (self.name, self.placement, [op.type for op in self.ops])
85
86 __repr__ = __str__
87
88 def get_primary_op_ifm_weights(self):
89 if not self.primary_op:
90 return None, None
91 return self.primary_op.get_ifm_ifm2_weights_ofm()[::2]
92
93 def get_primary_op_ifm_ifm2_weights_ofm(self):
94 if not self.primary_op:
95 return None, None, None, None
96 return self.primary_op.get_ifm_ifm2_weights_ofm()
97
98 def get_primary_op_ifm_weights_biases_ofm(self):
99 if not self.primary_op:
100 return None, None, None, None
101 return self.primary_op.get_ifm_weights_biases_ofm()
102
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200103 def get_primary_op_lut(self):
104 if not self.primary_op:
105 return None
106 return self.primary_op.activation_lut
107
Tim Hall79d07d22020-04-27 18:20:16 +0100108
109class SchedulingStrategy(enum.Enum):
110 Unknown = -1
111 IfmStream = 0
112 WeightStream = 1
113
114
115class SchedulerRewrite(enum.Enum):
116 Nop = 0
117 ChangeTensorSubPurpose = 1
118
119
120class CascadedPass:
121 def __init__(self, name, strat, inputs, intermediates, outputs, passes, placement, is_element_wise):
122 self.name = name
123 self.strategy = strat
124 self.inputs = inputs
125 self.intermediates = intermediates
126 self.outputs = outputs
127 self.passes = passes
128 self.placement = placement
129 self.is_element_wise = is_element_wise
130
131 self.predecessors = []
132 self.successors = []
Tim Halld8339a72021-05-27 18:49:40 +0100133 self.sram_used = 0
Jonas Ohlsson845e2322022-03-01 12:39:55 +0100134 self.time = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100135
136 def __str__(self):
137 return "<nng.CascadedPass strategy=%s x %s '%s', passes=%s, block_configs=%s>" % (
138 self.strategy,
139 len(self.passes),
140 self.name,
141 [ps.name for ps in self.passes],
142 [ps.block_config for ps in self.passes],
143 )
144
145 __repr__ = __str__
146
147
148class Subgraph:
149 def __init__(self, name="<unnamed>", placement=PassPlacement.Cpu):
150 self.output_tensors = []
151 self.input_tensors = []
152 self.original_inputs = [] # Preserve the original input order
153 self.passes = []
154 self.cascaded_passes = []
155 self.name = name
156 self.high_level_command_stream = []
157 self.placement = placement
158 self.command_stream_tensor = None
159 self.flash_tensor = None
Louis Verhaard0b9c9a32020-09-15 14:05:38 +0200160 # Scratch information locally used in the scheduler
Tim Halld8339a72021-05-27 18:49:40 +0100161 self.schedule = None
162 self.sched_ops = []
163
erik.andersson@arm.comad45f792021-02-03 10:20:16 +0100164 self.generated_stream_id = None
Tim Hall79d07d22020-04-27 18:20:16 +0100165
166 self.memory_used = {}
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200167 self.memory_used_per_type = {}
Tim Hall79d07d22020-04-27 18:20:16 +0100168
169 def __str__(self):
170 return "<nng.Subgraph '%s', n_passes=%d, n_cascaded_passes=%d>" % (
171 self.name,
172 len(self.passes),
173 len(self.cascaded_passes),
174 )
175
176 __repr__ = __str__
177
178 def update_consumers(self):
179 visit_op_set = set()
180 visit_tensor_set = set()
181 self.input_tensors = []
182
183 print_visit = False
184
185 def visit_op(op):
186 if op in visit_op_set:
187 return
188
189 visit_op_set.add(op)
190 for inp in op.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200191 if not inp:
192 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100193 if print_visit:
194 print(inp, "adding consumer", op)
195 visit_tensor(inp)
196 inp.consumer_list.append(op)
197
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000198 if op.type in (Op.Placeholder, Op.SubgraphInput):
Tim Hall79d07d22020-04-27 18:20:16 +0100199 assert len(op.outputs) == 1
200 self.input_tensors.append(op.outputs[0])
201
202 for out in op.outputs:
203 if out not in visit_tensor_set:
204 out.consumer_list = [] # reset unvisited output, just in case
205
206 def visit_tensor(tens):
207 if tens in visit_tensor_set:
208 return
209 visit_tensor_set.add(tens)
210 tens.consumer_list = []
211 for op in tens.ops:
212 visit_op(op)
213
214 for ps in self.passes:
215 for tens in ps.outputs + ps.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200216 if not tens:
217 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100218 tens.consumer_list = [] # reset unvisited tensors to start with
219
220 for tens in self.output_tensors:
221 visit_tensor(tens)
222 tens.consumer_list.append(None) # special op to indicate that the graph consumes the result
223
224 print_visit = True
225 for ps in self.passes:
226 for op in ps.ops:
227 visit_op(op)
228 for tens in ps.inputs:
229 visit_tensor(tens)
230
231 def build_pass_links(self):
232 for idx, ps in enumerate(self.passes):
233 ps.time = 2 * idx
234 ps.predecessors = []
235 ps.successors = []
236
237 for ps in self.passes:
238 for tens in ps.inputs:
239 for op in tens.ops:
240 pred_pass = op.scheduled_pass
241 assert pred_pass.time < ps.time
242 if ps not in pred_pass.successors:
243 pred_pass.successors.append(ps)
244
245 if pred_pass not in ps.predecessors:
246 ps.predecessors.append(pred_pass)
247
248 assert tens in pred_pass.outputs
249
250 def build_pass_dag_predecessors(self):
251 for ps in self.passes:
252 ps.dag_predecessors = []
253
254 class State(enum.Enum):
255 NotVisited = 0
256 BeingVisited = 1
257 Visited = 2
258
259 pass_visit_dict = {}
260
261 def visit_pass(ps):
262 state = pass_visit_dict.get(ps, State.NotVisited)
263 if state == State.Visited:
264 return True
265 elif state == State.BeingVisited:
266 return False # this is a loop, need to remove this link
267 elif state == State.NotVisited:
268 pass_visit_dict[ps] = State.BeingVisited
269
270 ps.dag_predecessors = []
271 for pred in ps.predecessors:
272 if visit_pass(pred):
273 ps.dag_predecessors.append(pred)
274
275 pass_visit_dict[ps] = State.Visited
276 return True
277
278 for ps in self.passes:
279 if not ps.successors:
280 visit_pass(ps)
281
282 def build_cascaded_pass_links(self):
283 for cps in self.cascaded_passes:
284 cps.predecessors = []
285 cps.successors = []
286
287 for cps in self.cascaded_passes:
288 for tens in cps.inputs:
289 for op in tens.ops:
290 pred_cpass = op.scheduled_pass.cascade
291 if cps not in pred_cpass.successors:
292 pred_cpass.successors.append(cps)
293
294 if pred_cpass not in cps.predecessors:
295 cps.predecessors.append(pred_cpass)
296
297 assert tens in pred_cpass.outputs
298
299 def refresh_after_modification(self):
300 self.update_consumers()
301
302 def prune_startup_init_pass(self):
303 assert len(self.passes) >= 1
304 ps = self.passes[0]
305 assert ps.placement == PassPlacement.StartupInit
306
307 ps.outputs = [out_tens for out_tens in ps.outputs if len(out_tens.consumers()) > 0]
308 ps.ops = [op for op in ps.ops if op.outputs[0] in ps.outputs]
309
310 def get_all_ops(self):
311 all_ops = []
312 visit_op_set = set()
313 visit_tensor_set = set()
314
315 def visit_op(op):
316 if op in visit_op_set:
317 return
318 visit_op_set.add(op)
319 for inp in op.inputs:
320 visit_tensor(inp)
321
322 all_ops.append(op)
323
324 def visit_tensor(tens):
Andreas Nevalainene1cc3de2020-09-08 15:31:02 +0200325 if tens is None or tens in visit_tensor_set:
Tim Hall79d07d22020-04-27 18:20:16 +0100326 return
327 visit_tensor_set.add(tens)
328 for op in tens.ops:
329 visit_op(op)
330
331 for tens in self.output_tensors:
332 visit_tensor(tens)
333
334 return all_ops
335
336 def print_operators(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100337 print("print_operators()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100338 all_ops = self.get_all_ops()
339 unique_ops = []
Tim Hall79d07d22020-04-27 18:20:16 +0100340 for op in all_ops:
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000341 if op.type in (Op.Const, Op.Identity, Op.Placeholder):
Tim Hall79d07d22020-04-27 18:20:16 +0100342 continue
343
Louis Verhaardaee5d752020-09-30 09:01:52 +0200344 attrs = op.attrs.copy()
345 if op.type in (Op.Conv2D, Op.Conv2DBias, Op.DepthwiseConv2DBias):
Tim Hall79d07d22020-04-27 18:20:16 +0100346 kshape = op.inputs[1].shape
347 attrs["kshape"] = [kshape[0], kshape[1]]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200348 attrs["type"] = op.type.name
Tim Hall79d07d22020-04-27 18:20:16 +0100349 attrs.pop("use_cudnn_on_gpu", None)
Fredrik Svedberg16343052021-04-16 14:36:22 +0200350 custom_options = attrs.pop("custom_options", None)
Tim Hall79d07d22020-04-27 18:20:16 +0100351 if attrs not in unique_ops:
352 unique_ops.append(attrs)
353 # print attributes in human readable format
354 a = attrs.copy()
Fredrik Svedberg16343052021-04-16 14:36:22 +0200355 if custom_options is not None:
356 a["custom_options"] = custom_options
Tim Hall79d07d22020-04-27 18:20:16 +0100357 s = a.pop("type")
358 data_format = a.pop("data_format", None)
359 if data_format and data_format != b"NHWC":
360 s += " " + str(data_format)
361 t = a.pop("T", None)
362 if t:
363 s += " " + str(t)[9:-2]
364 srct = a.pop("SrcT", None)
365 if srct:
366 s += " " + str(srct)[9:-2]
367 dstt = a.pop("DstT", None)
368 if dstt:
369 s += "->" + str(dstt)[9:-2]
370 print(s + " " + str(a))
371
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200372 def print_graph(self, label=None):
373 if label:
374 print(f"\n[ {label} ]")
Michael McGeagh775e3962020-07-28 11:44:22 +0100375 print("print_graph()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100376 all_ops = self.get_all_ops()
377 for idx, op in enumerate(all_ops):
378 print(idx, op.type, op.name)
379
380 def print_graph_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100381 print("print_graph_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100382 all_ops = self.get_all_ops()
383 for idx, op in enumerate(all_ops):
384 print(idx, op.type, op.name)
385 for idx, tens in enumerate(op.inputs):
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200386 if tens:
387 print(
388 f" Input {idx:02d}"
389 f" {tens.purpose.name:>20} {tens.mem_area.name:>20} {tens.mem_type.name:>20} {tens}"
390 )
391 else:
392 print(f" Input {idx:02d} {'-':>20} {'-':>20} {'-':>20} {tens}")
Tim Hall79d07d22020-04-27 18:20:16 +0100393 for idx, tens in enumerate(op.outputs):
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200394 print(
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200395 f" Output {idx:02d}"
396 f" {tens.purpose.name:>20} {tens.mem_area.name:>20} {tens.mem_type.name:>20} {tens}"
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200397 )
Tim Hall79d07d22020-04-27 18:20:16 +0100398 print()
399
400 def print_graph_with_tensor_quantization(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100401 print("print_graph_with_tensor_quantization()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100402 all_ops = self.get_all_ops()
403 for idx, op in enumerate(all_ops):
404 print(idx, op.type, op.name)
405 for idx, tens in enumerate(op.inputs):
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200406 if tens:
407 q = tens.quantization
408 if q is None:
409 print(f" Input {idx:02d} {tens.dtype!s:>10} NO QUANTIZATION INFO {tens.name}")
410 else:
411 print(
412 f" Input {idx:02d} {tens.dtype!s:>10}"
413 f" min={q.min} max={q.max} scale={q.scale_f32!s} zero_point={q.zero_point} {tens.name}"
414 )
Tim Hall79d07d22020-04-27 18:20:16 +0100415 else:
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200416 print(f" Input {idx:02d} {'-':>10} {tens}")
Tim Hall79d07d22020-04-27 18:20:16 +0100417 for idx, tens in enumerate(op.outputs):
418 q = tens.quantization
419 if q is None:
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200420 print(f" Output {idx:02d} {tens.dtype!s:>10} NO QUANTIZATION INFO {tens.name}")
Tim Hall79d07d22020-04-27 18:20:16 +0100421 else:
422 print(
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200423 f" Output {idx:02d} {tens.dtype!s:>10}"
424 f" min={q.min} max={q.max} scale={q.scale_f32!s} zero_point={q.zero_point} {tens.name}"
Tim Hall79d07d22020-04-27 18:20:16 +0100425 )
426 print()
427
428 def print_passes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100429 print("print_passes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100430 for idx, ps in enumerate(self.passes):
431 print("%03d %s" % (idx * 2, ps))
432
433 def print_passes_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100434 print("print_passes_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100435 for idx, ps in enumerate(self.passes):
436 print("%3d %s" % (idx * 2, ps))
437 for idx, tens in enumerate(ps.inputs):
438 print(
439 " Input %2d %-15s %-15s %-15s %s"
440 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
441 )
442 for idx, tens in enumerate(ps.intermediates):
443 print(
444 " Intermediate %2d %-15s %-15s %-15s %s"
445 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
446 )
447 for idx, tens in enumerate(ps.outputs):
448 print(
449 " Output %2d %-15s %-15s %-15s %s"
450 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
451 )
452 print()
453
454 def print_cascaded_passes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100455 print("print_cascaded_passes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100456 for idx, ps in enumerate(self.cascaded_passes):
457 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
458
459 def print_cascaded_passes_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100460 print("print_cascaded_passes_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100461 for idx, ps in enumerate(self.cascaded_passes):
462 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
463 for idx, tens in enumerate(ps.inputs):
464 print(
465 " Input %2d %-15s %-15s %-15s %s"
466 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
467 )
468 for idx, tens in enumerate(ps.intermediates):
469 print(
470 " Intermediate %2d %-15s %-15s %-15s %s"
471 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
472 )
473 for idx, tens in enumerate(ps.outputs):
474 print(
475 " Output %2d %-15s %-15s %-15s %s"
476 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
477 )
478 print()
479
480 def print_cascaded_passes_with_tensor_sizes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100481 print("print_cascaded_passes_with_tensor_sizes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100482 for idx, ps in enumerate(self.cascaded_passes):
483 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
484 for idx, tens in enumerate(ps.inputs):
485 print(
486 " Input %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
487 % (
488 idx,
489 tens.storage_size() / 1024,
490 tens.storage_shape,
491 tens.mem_area.name,
492 tens.purpose.name,
493 tens.format.name,
494 tens.name,
495 )
496 )
497 for idx, tens in enumerate(ps.intermediates):
498 print(
499 " Intermediate %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
500 % (
501 idx,
502 tens.storage_size() / 1024,
503 tens.storage_shape,
504 tens.mem_area.name,
505 tens.purpose.name,
506 tens.format.name,
507 tens.name,
508 )
509 )
510 for idx, tens in enumerate(ps.outputs):
511 print(
512 " Output %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
513 % (
514 idx,
515 tens.storage_size() / 1024,
516 tens.storage_shape,
517 tens.mem_area.name,
518 tens.purpose.name,
519 tens.format.name,
520 tens.name,
521 )
522 )
523 print()
524
525 def print_high_level_command_stream(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100526 print("print_high_level_command_stream()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100527 for idx, cmd in enumerate(self.high_level_command_stream):
528 print("%3d %s" % (idx, cmd))
529
530
531class Graph:
532 def __init__(self, name="<unnamed>", batch_size=1):
533 self.name = name
534 self.batch_size = batch_size
535 self.subgraphs = []
Michael McGeagh22f74e12020-08-07 16:21:03 +0100536 self.metadata = []
Tim Hall79d07d22020-04-27 18:20:16 +0100537 self.memory_used = {}
Diqing Zhongdb5124c2021-01-11 12:52:48 +0100538 self.total_original_weights = 0
Fredrik Svedbergf5c07c42021-04-23 14:36:42 +0200539 self.total_npu_encoded_weights = 0
Louis Verhaard3c07c972020-05-07 08:12:58 +0200540 self.weight_cache = None # See CompressedWeightCache
Jonas Ohlsson845e2322022-03-01 12:39:55 +0100541 self.bandwidths = 0
542 self.macs = 0
543 self.cycles = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100544
545 def get_root_subgraph(self):
546 return self.subgraphs[0]
547
548 def prune_startup_init_pass(self):
549 for sg in self.subgraphs:
550 sg.prune_startup_init_pass()
551
552 def update_consumers(self):
553 for sg in self.subgraphs:
554 sg.update_consumers()
555
556 def refresh_after_modification(self):
557 for sg in self.subgraphs:
558 sg.refresh_after_modification()
559
560 def print_operators(self):
561 for sg in self.subgraphs:
562 sg.print_operators()
563
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200564 def print_graph(self, label=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100565 for sg in self.subgraphs:
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200566 sg.print_graph(label)
Tim Hall79d07d22020-04-27 18:20:16 +0100567
568 def print_graph_with_tensors(self):
569 for sg in self.subgraphs:
570 sg.print_graph_with_tensors()
571
572 def print_graph_with_tensor_quantization(self):
573 for sg in self.subgraphs:
574 sg.print_graph_with_tensor_quantization()
575
576 def print_passes(self):
577 for sg in self.subgraphs:
578 sg.print_passes()
579
580 def print_passes_with_tensors(self):
581 for sg in self.subgraphs:
582 sg.print_passes_with_tensors()
583
584 def print_cascaded_passes(self):
585 for sg in self.subgraphs:
586 sg.print_cascaded_passes()
587
588 def print_cascaded_passes_with_tensors(self):
589 for sg in self.subgraphs:
590 sg.print_cascaded_passes_with_tensors()
591
592 def print_cascaded_passes_with_tensor_sizes(self):
593 for sg in self.subgraphs:
594 sg.print_cascaded_passes_with_tensor_sizes()
595
596 def print_high_level_command_stream(self):
597 for sg in self.subgraphs:
598 sg.print_high_level_command_stream()