blob: 6fc3b65336039197188b7900612c8f36f4c9fcf9 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18# Description:
19# Contains the main sequencing of the compiler.
20
Diego Russoea6111a2020-04-14 18:41:58 +010021import time
22
Tim Hall79d07d22020-04-27 18:20:16 +010023from . import graph_optimiser
24from . import mark_tensors
25from . import insert_dma
26from . import pass_packing
27from . import scheduler
28from . import tensor_allocation
29from . import npu_performance
Tim Hall79d07d22020-04-27 18:20:16 +010030from . import high_level_command_stream_generator
31from . import register_command_stream_generator
32from . import extract_npu_subgraphs
33from . import npu_serialisation
34from . import weight_compressor
35from . import live_range
36from .tensor import MemArea
37from .nn_graph import TensorAllocator, PassPlacement
Diego Russoea6111a2020-04-14 18:41:58 +010038from .rewrite_graph import verify_graph_health
Tim Hall79d07d22020-04-27 18:20:16 +010039
40
41class CompilerOptions:
42 """Set of options to change compiler behaviour - verbosity, targets, turning off passes.
43
44Note the difference between ArchitectureFeatures and CompilerOptions
45- ArchitectureFeatures is for changing the Ethos-U55 and system architecture
46- CompilerOptions is for changing the behaviour of the compiler
47"""
48
49 def __init__(
50 self,
51 verbose_graph=False,
52 verbose_quantization=False,
53 verbose_packing=False,
54 verbose_tensor_purpose=False,
55 verbose_tensor_format=False,
56 verbose_allocation=False,
57 verbose_high_level_command_stream=False,
58 verbose_register_command_stream=False,
59 verbose_operators=False,
60 show_minimum_possible_allocation=False,
61 show_cpu_operations=False,
62 tensor_allocator=TensorAllocator.Greedy,
63 timing=False,
64 output_dir="outputs",
65 ):
66
67 self.verbose_graph = verbose_graph
68 self.verbose_quantization = verbose_quantization
69 self.verbose_packing = verbose_packing
70 self.verbose_tensor_purpose = verbose_tensor_purpose
71 self.verbose_tensor_format = verbose_tensor_format
72 self.verbose_allocation = verbose_allocation
73 self.verbose_high_level_command_stream = verbose_high_level_command_stream
74 self.verbose_register_command_stream = verbose_register_command_stream
75 self.verbose_operators = verbose_operators
76 self.show_minimum_possible_allocation = show_minimum_possible_allocation
77 self.show_cpu_operations = show_cpu_operations
78 self.tensor_allocator = tensor_allocator
79 self.timing = timing
80 self.output_dir = output_dir
81
82 def __str__(self):
83 return type(self).__name__ + ": " + str(self.__dict__)
84
85 __repr__ = __str__
86
87
88def compiler_driver(nng, arch, options, scheduler_options):
89 assert verify_graph_health(nng)
90 nng = graph_optimiser.optimise_graph_a(nng, arch, options.verbose_graph)
91 assert verify_graph_health(nng)
92
93 if options.verbose_quantization:
94 nng.print_graph_with_tensor_quantization()
95
96 nng = graph_optimiser.optimise_graph_b(nng, arch, options.verbose_graph)
97 assert verify_graph_health(nng)
98
99 nng = mark_tensors.mark_tensor_purpose(nng, arch, options.verbose_tensor_purpose)
100 assert verify_graph_health(nng)
101 nng = insert_dma.insert_dma_commands(nng, arch, options.verbose_graph)
102 assert verify_graph_health(nng)
103 pass_packing.pack_into_passes(nng, arch, options.verbose_packing)
104 assert verify_graph_health(nng)
105
106 extract_npu_subgraphs.extract_npu_subgraphs(nng, arch)
107
108 mark_tensors.mark_tensor_format(nng, arch, options.verbose_tensor_format)
109 assert verify_graph_health(nng)
110 if options.timing:
111 start = time.time()
112
113 # Run the scheduler
114 scheduler.schedule_passes(nng, arch, scheduler_options)
115
116 if options.timing:
117 stop = time.time()
118 print("Scheduling took %f s" % (stop - start))
119 start = time.time()
120
121 # Update the compressed weights now that we have determined the
122 # block config, and calc and pack the scales and biases
123 weight_compressor.update_pass_weight_and_scale_tensors(nng, arch)
124
125 # Memory area for all non-constant tensors (Cpu and Npu)
126 non_const_mem_area = MemArea.Sram
127
128 # LiveRanges for constant tensors for all Npu subgraphs
129 permanent_storage = arch.permanent_storage_mem_area
130 lr_graph_flash = live_range.LiveRangeGraph()
131
132 # Placeholders for scratch and flash tensors that are common for all Npu subgraphs
133 scratch_tens = None
134 flash_tens = None
135
136 # Calculate live ranges for all constant Npu tensors, in permanent storage
137 for sg in nng.subgraphs:
138 if sg.placement == PassPlacement.Npu:
139 lr_graph_flash = live_range.extract_live_ranges_from_cascaded_passes(
140 sg, permanent_storage, ignore_subgraph_input_output_tensors=True, lr_graph=lr_graph_flash
141 )
142
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200143 assert len(nng.subgraphs) > 1, "Error: No operators can be hardware accelerated; cancelling compilation"
144
Tim Hall79d07d22020-04-27 18:20:16 +0100145 # Allocate all Npu constant tensors to the first Npu subgraph since it is
146 # processed first during serialization into tensors
147 first_npu_sg = nng.subgraphs[1]
148 assert first_npu_sg.placement == PassPlacement.Npu
149 tensor_allocation.allocate_tensors(
150 nng,
151 first_npu_sg,
152 arch,
153 permanent_storage,
154 scheduler_options.use_ifm_ofm_overlap,
155 options.tensor_allocator,
156 options.verbose_allocation,
157 options.show_minimum_possible_allocation,
158 lr_graph_flash,
159 )
160
161 # Allocate all non-constant tensors to the root, i.e. Cpu, subgraph. This step
162 # will start at the root subgraph's input and traverse from top to bottom. When
163 # it comes across an Npu-op it will extract live ranges for it's corresponding
164 # Npu subgraph and add them to the root's live range graph. Finally, all of the
165 # non-constant tensors are allocated together
166 root_sg = nng.get_root_subgraph()
167 tensor_allocation.allocate_tensors(
168 nng,
169 root_sg,
170 arch,
171 non_const_mem_area,
172 scheduler_options.use_ifm_ofm_overlap,
173 options.tensor_allocator,
174 options.verbose_allocation,
175 options.show_minimum_possible_allocation,
176 )
177
178 # Generate command streams and serialise Npu-ops into tensors
179 for sg in nng.subgraphs:
180 high_level_command_stream_generator.generate_high_level_command_stream(
181 nng, sg, arch, options.verbose_high_level_command_stream
182 )
183 register_command_stream_generator.generate_register_command_stream(
184 nng, sg, arch, options.verbose_register_command_stream
185 )
186 scratch_tens, flash_tens = npu_serialisation.serialise_npu_subgraph_into_tensors(
187 nng, sg, arch, scratch_tens, flash_tens
188 )
189
190 npu_serialisation.rewrite_npu_call_ops(nng, root_sg, arch)
191
192 # Allocate all Cpu constant tensors, this is done last because the Npu-ops
193 # have to be serialized into flash and scratch tensors first
194 tensor_allocation.allocate_tensors(
195 nng,
196 root_sg,
197 arch,
198 permanent_storage,
199 scheduler_options.use_ifm_ofm_overlap,
200 options.tensor_allocator,
201 options.verbose_allocation,
202 options.show_minimum_possible_allocation,
203 )
204
205 npu_performance.calc_performance_for_network(nng, arch)