blob: 671843f3409818585b0faf78b56aeb381a6eafbe [file] [log] [blame]
erik.andersson@arm.comad45f792021-02-03 10:20:16 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Neural network graph classes and enums.
18# Pass - A packed pass containing one or more Operations.
19# CascadedPass - A scheduled pass containing one or more Passes, as well as a scheduling strategy and block
20# configurations.
21# Subgraph - Holds a neural network subgraph, pointing at Tensors, Operations, Passes, and CascadedPasses.
22# Graph - A full neural network graph with one or more Subgraphs.
Tim Hall79d07d22020-04-27 18:20:16 +010023import enum
patrik.gustavssoneeb85152020-12-21 17:10:40 +000024from typing import List
Tim Hall79d07d22020-04-27 18:20:16 +010025
Louis Verhaardaee5d752020-09-30 09:01:52 +020026from .operation import Op
patrik.gustavssoneeb85152020-12-21 17:10:40 +000027from .shape4d import Shape4D
Louis Verhaardaee5d752020-09-30 09:01:52 +020028
Tim Hall79d07d22020-04-27 18:20:16 +010029
30class PassPlacement(enum.Enum):
31 Unknown = 0
32 Cpu = 1
33 Npu = 2
34 MemoryOnly = 3
35 StartupInit = 4
36
37
38class TensorAllocator(enum.Enum):
39 LinearAlloc = 1
40 Greedy = 2
Louis Verhaardd7002522021-01-20 17:23:54 +010041 HillClimb = 3
Tim Hall79d07d22020-04-27 18:20:16 +010042
43 def __str__(self):
44 return self.name
45
46
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020047class NetworkType(enum.Enum):
48 TFLite = 1
49 TOSA = 2
50
51
Tim Hall79d07d22020-04-27 18:20:16 +010052class Pass:
53 def __init__(self, name, placement, is_element_wise, npu_block_type):
54 self.inputs = []
55 self.intermediates = []
56 self.outputs = []
57 self.ops = []
58 self.primary_op = None
59 self.ifm_tensor = None
60 self.ifm2_tensor = None
61 self.ofm_tensor = None
62 self.weight_tensor = None
63 self.scale_tensor = None
Fredrik Svedberga0c36242020-06-03 15:43:31 +020064 self.lut_tensor = None
Tim Hall79d07d22020-04-27 18:20:16 +010065 self.name = name
66 self.cascade = None
67 self.placement = placement
patrik.gustavssoneeb85152020-12-21 17:10:40 +000068 self.ifm_shapes: List[Shape4D] = []
69 self.ofm_shapes: List[Shape4D] = []
Tim Hall79d07d22020-04-27 18:20:16 +010070
71 # TODO: rename is_element_wise because it is not the same as an ElementWise operator. It is used by the tensor
72 # allocation and requires that the OFM and IFM has the exact same address. Essentially complete overlap.
73 self.is_element_wise = is_element_wise
74 self.npu_block_type = npu_block_type
75 self.block_config = None # will be filled in by scheduler
76 self.shared_buffer = None # will be filled in by scheduler
Tim Halld8339a72021-05-27 18:49:40 +010077 self.scheduling_info = None # will be filled in by scheduler
Tim Hall79d07d22020-04-27 18:20:16 +010078
79 self.predecessors = []
80 self.successors = []
81
82 def __str__(self):
83 return "<nng.Pass '%s', %s, ops=%s>" % (self.name, self.placement, [op.type for op in self.ops])
84
85 __repr__ = __str__
86
87 def get_primary_op_ifm_weights(self):
88 if not self.primary_op:
89 return None, None
90 return self.primary_op.get_ifm_ifm2_weights_ofm()[::2]
91
92 def get_primary_op_ifm_ifm2_weights_ofm(self):
93 if not self.primary_op:
94 return None, None, None, None
95 return self.primary_op.get_ifm_ifm2_weights_ofm()
96
97 def get_primary_op_ifm_weights_biases_ofm(self):
98 if not self.primary_op:
99 return None, None, None, None
100 return self.primary_op.get_ifm_weights_biases_ofm()
101
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200102 def get_primary_op_lut(self):
103 if not self.primary_op:
104 return None
105 return self.primary_op.activation_lut
106
Tim Hall79d07d22020-04-27 18:20:16 +0100107
108class SchedulingStrategy(enum.Enum):
109 Unknown = -1
110 IfmStream = 0
111 WeightStream = 1
112
113
114class SchedulerRewrite(enum.Enum):
115 Nop = 0
116 ChangeTensorSubPurpose = 1
117
118
119class CascadedPass:
120 def __init__(self, name, strat, inputs, intermediates, outputs, passes, placement, is_element_wise):
121 self.name = name
122 self.strategy = strat
123 self.inputs = inputs
124 self.intermediates = intermediates
125 self.outputs = outputs
126 self.passes = passes
127 self.placement = placement
128 self.is_element_wise = is_element_wise
129
130 self.predecessors = []
131 self.successors = []
Tim Halld8339a72021-05-27 18:49:40 +0100132 self.sram_used = 0
Jonas Ohlsson845e2322022-03-01 12:39:55 +0100133 self.time = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100134
135 def __str__(self):
136 return "<nng.CascadedPass strategy=%s x %s '%s', passes=%s, block_configs=%s>" % (
137 self.strategy,
138 len(self.passes),
139 self.name,
140 [ps.name for ps in self.passes],
141 [ps.block_config for ps in self.passes],
142 )
143
144 __repr__ = __str__
145
146
147class Subgraph:
148 def __init__(self, name="<unnamed>", placement=PassPlacement.Cpu):
149 self.output_tensors = []
150 self.input_tensors = []
151 self.original_inputs = [] # Preserve the original input order
152 self.passes = []
153 self.cascaded_passes = []
154 self.name = name
155 self.high_level_command_stream = []
156 self.placement = placement
157 self.command_stream_tensor = None
158 self.flash_tensor = None
Louis Verhaard0b9c9a32020-09-15 14:05:38 +0200159 # Scratch information locally used in the scheduler
Tim Halld8339a72021-05-27 18:49:40 +0100160 self.schedule = None
161 self.sched_ops = []
162
erik.andersson@arm.comad45f792021-02-03 10:20:16 +0100163 self.generated_stream_id = None
Tim Hall79d07d22020-04-27 18:20:16 +0100164
165 self.memory_used = {}
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200166 self.memory_used_per_type = {}
Tim Hall79d07d22020-04-27 18:20:16 +0100167
168 def __str__(self):
169 return "<nng.Subgraph '%s', n_passes=%d, n_cascaded_passes=%d>" % (
170 self.name,
171 len(self.passes),
172 len(self.cascaded_passes),
173 )
174
175 __repr__ = __str__
176
177 def update_consumers(self):
178 visit_op_set = set()
179 visit_tensor_set = set()
180 self.input_tensors = []
181
182 print_visit = False
183
184 def visit_op(op):
185 if op in visit_op_set:
186 return
187
188 visit_op_set.add(op)
189 for inp in op.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200190 if not inp:
191 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100192 if print_visit:
193 print(inp, "adding consumer", op)
194 visit_tensor(inp)
195 inp.consumer_list.append(op)
196
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000197 if op.type in (Op.Placeholder, Op.SubgraphInput):
Tim Hall79d07d22020-04-27 18:20:16 +0100198 assert len(op.outputs) == 1
199 self.input_tensors.append(op.outputs[0])
200
201 for out in op.outputs:
202 if out not in visit_tensor_set:
203 out.consumer_list = [] # reset unvisited output, just in case
204
205 def visit_tensor(tens):
206 if tens in visit_tensor_set:
207 return
208 visit_tensor_set.add(tens)
209 tens.consumer_list = []
210 for op in tens.ops:
211 visit_op(op)
212
213 for ps in self.passes:
214 for tens in ps.outputs + ps.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200215 if not tens:
216 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100217 tens.consumer_list = [] # reset unvisited tensors to start with
218
219 for tens in self.output_tensors:
220 visit_tensor(tens)
221 tens.consumer_list.append(None) # special op to indicate that the graph consumes the result
222
223 print_visit = True
224 for ps in self.passes:
225 for op in ps.ops:
226 visit_op(op)
227 for tens in ps.inputs:
228 visit_tensor(tens)
229
230 def build_pass_links(self):
231 for idx, ps in enumerate(self.passes):
232 ps.time = 2 * idx
233 ps.predecessors = []
234 ps.successors = []
235
236 for ps in self.passes:
237 for tens in ps.inputs:
238 for op in tens.ops:
239 pred_pass = op.scheduled_pass
240 assert pred_pass.time < ps.time
241 if ps not in pred_pass.successors:
242 pred_pass.successors.append(ps)
243
244 if pred_pass not in ps.predecessors:
245 ps.predecessors.append(pred_pass)
246
247 assert tens in pred_pass.outputs
248
249 def build_pass_dag_predecessors(self):
250 for ps in self.passes:
251 ps.dag_predecessors = []
252
253 class State(enum.Enum):
254 NotVisited = 0
255 BeingVisited = 1
256 Visited = 2
257
258 pass_visit_dict = {}
259
260 def visit_pass(ps):
261 state = pass_visit_dict.get(ps, State.NotVisited)
262 if state == State.Visited:
263 return True
264 elif state == State.BeingVisited:
265 return False # this is a loop, need to remove this link
266 elif state == State.NotVisited:
267 pass_visit_dict[ps] = State.BeingVisited
268
269 ps.dag_predecessors = []
270 for pred in ps.predecessors:
271 if visit_pass(pred):
272 ps.dag_predecessors.append(pred)
273
274 pass_visit_dict[ps] = State.Visited
275 return True
276
277 for ps in self.passes:
278 if not ps.successors:
279 visit_pass(ps)
280
281 def build_cascaded_pass_links(self):
282 for cps in self.cascaded_passes:
283 cps.predecessors = []
284 cps.successors = []
285
286 for cps in self.cascaded_passes:
287 for tens in cps.inputs:
288 for op in tens.ops:
289 pred_cpass = op.scheduled_pass.cascade
290 if cps not in pred_cpass.successors:
291 pred_cpass.successors.append(cps)
292
293 if pred_cpass not in cps.predecessors:
294 cps.predecessors.append(pred_cpass)
295
296 assert tens in pred_cpass.outputs
297
298 def refresh_after_modification(self):
299 self.update_consumers()
300
301 def prune_startup_init_pass(self):
302 assert len(self.passes) >= 1
303 ps = self.passes[0]
304 assert ps.placement == PassPlacement.StartupInit
305
306 ps.outputs = [out_tens for out_tens in ps.outputs if len(out_tens.consumers()) > 0]
307 ps.ops = [op for op in ps.ops if op.outputs[0] in ps.outputs]
308
309 def get_all_ops(self):
310 all_ops = []
311 visit_op_set = set()
312 visit_tensor_set = set()
313
314 def visit_op(op):
315 if op in visit_op_set:
316 return
317 visit_op_set.add(op)
318 for inp in op.inputs:
319 visit_tensor(inp)
320
321 all_ops.append(op)
322
323 def visit_tensor(tens):
Andreas Nevalainene1cc3de2020-09-08 15:31:02 +0200324 if tens is None or tens in visit_tensor_set:
Tim Hall79d07d22020-04-27 18:20:16 +0100325 return
326 visit_tensor_set.add(tens)
327 for op in tens.ops:
328 visit_op(op)
329
330 for tens in self.output_tensors:
331 visit_tensor(tens)
332
333 return all_ops
334
335 def print_operators(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100336 print("print_operators()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100337 all_ops = self.get_all_ops()
338 unique_ops = []
Tim Hall79d07d22020-04-27 18:20:16 +0100339 for op in all_ops:
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000340 if op.type in (Op.Const, Op.Identity, Op.Placeholder):
Tim Hall79d07d22020-04-27 18:20:16 +0100341 continue
342
Louis Verhaardaee5d752020-09-30 09:01:52 +0200343 attrs = op.attrs.copy()
344 if op.type in (Op.Conv2D, Op.Conv2DBias, Op.DepthwiseConv2DBias):
Tim Hall79d07d22020-04-27 18:20:16 +0100345 kshape = op.inputs[1].shape
346 attrs["kshape"] = [kshape[0], kshape[1]]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200347 attrs["type"] = op.type.name
Tim Hall79d07d22020-04-27 18:20:16 +0100348 attrs.pop("use_cudnn_on_gpu", None)
Fredrik Svedberg16343052021-04-16 14:36:22 +0200349 custom_options = attrs.pop("custom_options", None)
Tim Hall79d07d22020-04-27 18:20:16 +0100350 if attrs not in unique_ops:
351 unique_ops.append(attrs)
352 # print attributes in human readable format
353 a = attrs.copy()
Fredrik Svedberg16343052021-04-16 14:36:22 +0200354 if custom_options is not None:
355 a["custom_options"] = custom_options
Tim Hall79d07d22020-04-27 18:20:16 +0100356 s = a.pop("type")
357 data_format = a.pop("data_format", None)
358 if data_format and data_format != b"NHWC":
359 s += " " + str(data_format)
360 t = a.pop("T", None)
361 if t:
362 s += " " + str(t)[9:-2]
363 srct = a.pop("SrcT", None)
364 if srct:
365 s += " " + str(srct)[9:-2]
366 dstt = a.pop("DstT", None)
367 if dstt:
368 s += "->" + str(dstt)[9:-2]
369 print(s + " " + str(a))
370
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200371 def print_graph(self, label=None):
372 if label:
373 print(f"\n[ {label} ]")
Michael McGeagh775e3962020-07-28 11:44:22 +0100374 print("print_graph()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100375 all_ops = self.get_all_ops()
376 for idx, op in enumerate(all_ops):
377 print(idx, op.type, op.name)
378
379 def print_graph_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100380 print("print_graph_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100381 all_ops = self.get_all_ops()
382 for idx, op in enumerate(all_ops):
383 print(idx, op.type, op.name)
384 for idx, tens in enumerate(op.inputs):
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200385 if tens:
386 print(
387 f" Input {idx:02d}"
388 f" {tens.purpose.name:>20} {tens.mem_area.name:>20} {tens.mem_type.name:>20} {tens}"
389 )
390 else:
391 print(f" Input {idx:02d} {'-':>20} {'-':>20} {'-':>20} {tens}")
Tim Hall79d07d22020-04-27 18:20:16 +0100392 for idx, tens in enumerate(op.outputs):
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200393 print(
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200394 f" Output {idx:02d}"
395 f" {tens.purpose.name:>20} {tens.mem_area.name:>20} {tens.mem_type.name:>20} {tens}"
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200396 )
Tim Hall79d07d22020-04-27 18:20:16 +0100397 print()
398
399 def print_graph_with_tensor_quantization(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100400 print("print_graph_with_tensor_quantization()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100401 all_ops = self.get_all_ops()
402 for idx, op in enumerate(all_ops):
403 print(idx, op.type, op.name)
404 for idx, tens in enumerate(op.inputs):
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200405 if tens:
406 q = tens.quantization
407 if q is None:
408 print(f" Input {idx:02d} {tens.dtype!s:>10} NO QUANTIZATION INFO {tens.name}")
409 else:
410 print(
411 f" Input {idx:02d} {tens.dtype!s:>10}"
412 f" min={q.min} max={q.max} scale={q.scale_f32!s} zero_point={q.zero_point} {tens.name}"
413 )
Tim Hall79d07d22020-04-27 18:20:16 +0100414 else:
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200415 print(f" Input {idx:02d} {'-':>10} {tens}")
Tim Hall79d07d22020-04-27 18:20:16 +0100416 for idx, tens in enumerate(op.outputs):
417 q = tens.quantization
418 if q is None:
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200419 print(f" Output {idx:02d} {tens.dtype!s:>10} NO QUANTIZATION INFO {tens.name}")
Tim Hall79d07d22020-04-27 18:20:16 +0100420 else:
421 print(
Fredrik Svedbergb3d941e2021-10-13 14:06:03 +0200422 f" Output {idx:02d} {tens.dtype!s:>10}"
423 f" min={q.min} max={q.max} scale={q.scale_f32!s} zero_point={q.zero_point} {tens.name}"
Tim Hall79d07d22020-04-27 18:20:16 +0100424 )
425 print()
426
427 def print_passes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100428 print("print_passes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100429 for idx, ps in enumerate(self.passes):
430 print("%03d %s" % (idx * 2, ps))
431
432 def print_passes_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100433 print("print_passes_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100434 for idx, ps in enumerate(self.passes):
435 print("%3d %s" % (idx * 2, ps))
436 for idx, tens in enumerate(ps.inputs):
437 print(
438 " Input %2d %-15s %-15s %-15s %s"
439 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
440 )
441 for idx, tens in enumerate(ps.intermediates):
442 print(
443 " Intermediate %2d %-15s %-15s %-15s %s"
444 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
445 )
446 for idx, tens in enumerate(ps.outputs):
447 print(
448 " Output %2d %-15s %-15s %-15s %s"
449 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
450 )
451 print()
452
453 def print_cascaded_passes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100454 print("print_cascaded_passes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100455 for idx, ps in enumerate(self.cascaded_passes):
456 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
457
458 def print_cascaded_passes_with_tensors(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100459 print("print_cascaded_passes_with_tensors()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100460 for idx, ps in enumerate(self.cascaded_passes):
461 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
462 for idx, tens in enumerate(ps.inputs):
463 print(
464 " Input %2d %-15s %-15s %-15s %s"
465 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
466 )
467 for idx, tens in enumerate(ps.intermediates):
468 print(
469 " Intermediate %2d %-15s %-15s %-15s %s"
470 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
471 )
472 for idx, tens in enumerate(ps.outputs):
473 print(
474 " Output %2d %-15s %-15s %-15s %s"
475 % (idx, tens.purpose.name, tens.mem_area.name, tens.format.name, tens.name)
476 )
477 print()
478
479 def print_cascaded_passes_with_tensor_sizes(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100480 print("print_cascaded_passes_with_tensor_sizes()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100481 for idx, ps in enumerate(self.cascaded_passes):
482 print("%3d %s SRAM used %.1f KB" % (idx * 2, ps, ps.sram_used / 1024))
483 for idx, tens in enumerate(ps.inputs):
484 print(
485 " Input %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
486 % (
487 idx,
488 tens.storage_size() / 1024,
489 tens.storage_shape,
490 tens.mem_area.name,
491 tens.purpose.name,
492 tens.format.name,
493 tens.name,
494 )
495 )
496 for idx, tens in enumerate(ps.intermediates):
497 print(
498 " Intermediate %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
499 % (
500 idx,
501 tens.storage_size() / 1024,
502 tens.storage_shape,
503 tens.mem_area.name,
504 tens.purpose.name,
505 tens.format.name,
506 tens.name,
507 )
508 )
509 for idx, tens in enumerate(ps.outputs):
510 print(
511 " Output %2d %7.1f KB %-24s %-15s %-15s %-20s %s"
512 % (
513 idx,
514 tens.storage_size() / 1024,
515 tens.storage_shape,
516 tens.mem_area.name,
517 tens.purpose.name,
518 tens.format.name,
519 tens.name,
520 )
521 )
522 print()
523
524 def print_high_level_command_stream(self):
Michael McGeagh775e3962020-07-28 11:44:22 +0100525 print("print_high_level_command_stream()", self.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100526 for idx, cmd in enumerate(self.high_level_command_stream):
527 print("%3d %s" % (idx, cmd))
528
529
530class Graph:
531 def __init__(self, name="<unnamed>", batch_size=1):
532 self.name = name
533 self.batch_size = batch_size
534 self.subgraphs = []
Michael McGeagh22f74e12020-08-07 16:21:03 +0100535 self.metadata = []
Tim Hall79d07d22020-04-27 18:20:16 +0100536 self.memory_used = {}
Diqing Zhongdb5124c2021-01-11 12:52:48 +0100537 self.total_original_weights = 0
Fredrik Svedbergf5c07c42021-04-23 14:36:42 +0200538 self.total_npu_weights = 0
539 self.total_npu_encoded_weights = 0
Louis Verhaard3c07c972020-05-07 08:12:58 +0200540 self.weight_cache = None # See CompressedWeightCache
Jonas Ohlsson845e2322022-03-01 12:39:55 +0100541 self.bandwidths = 0
542 self.macs = 0
543 self.cycles = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100544
545 def get_root_subgraph(self):
546 return self.subgraphs[0]
547
548 def prune_startup_init_pass(self):
549 for sg in self.subgraphs:
550 sg.prune_startup_init_pass()
551
552 def update_consumers(self):
553 for sg in self.subgraphs:
554 sg.update_consumers()
555
556 def refresh_after_modification(self):
557 for sg in self.subgraphs:
558 sg.refresh_after_modification()
559
560 def print_operators(self):
561 for sg in self.subgraphs:
562 sg.print_operators()
563
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200564 def print_graph(self, label=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100565 for sg in self.subgraphs:
Fredrik Svedbergc875aa62021-05-06 09:53:31 +0200566 sg.print_graph(label)
Tim Hall79d07d22020-04-27 18:20:16 +0100567
568 def print_graph_with_tensors(self):
569 for sg in self.subgraphs:
570 sg.print_graph_with_tensors()
571
572 def print_graph_with_tensor_quantization(self):
573 for sg in self.subgraphs:
574 sg.print_graph_with_tensor_quantization()
575
576 def print_passes(self):
577 for sg in self.subgraphs:
578 sg.print_passes()
579
580 def print_passes_with_tensors(self):
581 for sg in self.subgraphs:
582 sg.print_passes_with_tensors()
583
584 def print_cascaded_passes(self):
585 for sg in self.subgraphs:
586 sg.print_cascaded_passes()
587
588 def print_cascaded_passes_with_tensors(self):
589 for sg in self.subgraphs:
590 sg.print_cascaded_passes_with_tensors()
591
592 def print_cascaded_passes_with_tensor_sizes(self):
593 for sg in self.subgraphs:
594 sg.print_cascaded_passes_with_tensor_sizes()
595
596 def print_high_level_command_stream(self):
597 for sg in self.subgraphs:
598 sg.print_high_level_command_stream()