wilisa01 | 89a8cdd | 2022-08-22 16:13:06 +0000 | [diff] [blame] | 1 | # Copyright (C) 2020-2022 Arm Limited or its affiliates. All rights reserved. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Contains the main sequencing of the compiler. |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 18 | import time |
| 19 | |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 20 | from . import extract_npu_subgraphs |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 21 | from . import graph_optimiser |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 22 | from . import high_level_command_stream_generator |
Louis Verhaard | 1e17018 | 2020-11-26 11:42:04 +0100 | [diff] [blame] | 23 | from . import high_level_command_to_npu_op |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 24 | from . import live_range |
Louis Verhaard | 0b8268a | 2020-08-05 16:11:29 +0200 | [diff] [blame] | 25 | from . import lut |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 26 | from . import mark_tensors |
| 27 | from . import npu_performance |
| 28 | from . import npu_serialisation |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 29 | from . import pass_packing |
| 30 | from . import scheduler |
| 31 | from . import tensor_allocation |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 32 | from .debug_database import DebugDatabase |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 33 | from .nn_graph import PassPlacement |
| 34 | from .nn_graph import TensorAllocator |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 35 | from .operation import Op |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 36 | from .rewrite_graph import verify_graph_health |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 37 | from .rewrite_graph import visit_graph_post_order |
Tim Hall | d8339a7 | 2021-05-27 18:49:40 +0100 | [diff] [blame] | 38 | from .scheduler import OptimizationStrategy |
| 39 | from .tensor import MemArea |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 40 | from .tensor import MemType |
Jacob Bohlin | 0628a8c | 2020-08-28 13:25:14 +0200 | [diff] [blame] | 41 | from .tensor import Tensor |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 42 | |
| 43 | |
| 44 | class CompilerOptions: |
| 45 | """Set of options to change compiler behaviour - verbosity, targets, turning off passes. |
| 46 | |
Jonas Ohlsson | d857507 | 2022-03-30 10:30:25 +0200 | [diff] [blame] | 47 | Note the difference between ArchitectureFeatures and CompilerOptions |
| 48 | - ArchitectureFeatures is for changing the Ethos-U and system architecture |
| 49 | - CompilerOptions is for changing the behaviour of the compiler""" |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 50 | |
| 51 | def __init__( |
| 52 | self, |
| 53 | verbose_graph=False, |
| 54 | verbose_quantization=False, |
| 55 | verbose_packing=False, |
| 56 | verbose_tensor_purpose=False, |
| 57 | verbose_tensor_format=False, |
| 58 | verbose_allocation=False, |
| 59 | verbose_high_level_command_stream=False, |
| 60 | verbose_register_command_stream=False, |
| 61 | verbose_operators=False, |
Fredrik Svedberg | f5c07c4 | 2021-04-23 14:36:42 +0200 | [diff] [blame] | 62 | verbose_weights=False, |
Tim Hall | c1be087 | 2022-03-03 17:50:52 +0000 | [diff] [blame] | 63 | verbose_performance=False, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 64 | show_cpu_operations=False, |
| 65 | tensor_allocator=TensorAllocator.Greedy, |
| 66 | timing=False, |
| 67 | output_dir="outputs", |
Tim Hall | b9b515c | 2020-11-01 21:27:19 +0000 | [diff] [blame] | 68 | cpu_tensor_alignment=Tensor.AllocationQuantum, |
Tim Hall | cda4fcb | 2022-05-19 12:36:58 +0100 | [diff] [blame] | 69 | hillclimb_max_iterations=None, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 70 | ): |
| 71 | |
| 72 | self.verbose_graph = verbose_graph |
| 73 | self.verbose_quantization = verbose_quantization |
| 74 | self.verbose_packing = verbose_packing |
| 75 | self.verbose_tensor_purpose = verbose_tensor_purpose |
| 76 | self.verbose_tensor_format = verbose_tensor_format |
| 77 | self.verbose_allocation = verbose_allocation |
| 78 | self.verbose_high_level_command_stream = verbose_high_level_command_stream |
| 79 | self.verbose_register_command_stream = verbose_register_command_stream |
| 80 | self.verbose_operators = verbose_operators |
Fredrik Svedberg | f5c07c4 | 2021-04-23 14:36:42 +0200 | [diff] [blame] | 81 | self.verbose_weights = verbose_weights |
Tim Hall | c1be087 | 2022-03-03 17:50:52 +0000 | [diff] [blame] | 82 | self.verbose_performance = verbose_performance |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 83 | self.show_cpu_operations = show_cpu_operations |
| 84 | self.tensor_allocator = tensor_allocator |
| 85 | self.timing = timing |
| 86 | self.output_dir = output_dir |
Tim Hall | b9b515c | 2020-11-01 21:27:19 +0000 | [diff] [blame] | 87 | self.cpu_tensor_alignment = cpu_tensor_alignment |
Tim Hall | cda4fcb | 2022-05-19 12:36:58 +0100 | [diff] [blame] | 88 | self.hillclimb_max_iterations = hillclimb_max_iterations |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 89 | |
| 90 | def __str__(self): |
| 91 | return type(self).__name__ + ": " + str(self.__dict__) |
| 92 | |
| 93 | __repr__ = __str__ |
| 94 | |
| 95 | |
Louis Verhaard | 0b9c9a3 | 2020-09-15 14:05:38 +0200 | [diff] [blame] | 96 | def next_sram_factor(alloc_results): |
| 97 | # Bisects to find the max SRAM usage that successfully can be fitted with the tensor allocator. |
| 98 | # Returns tuple (factor, dry_test), with factor is None (stop) or 0 <= factor <= 1 (next SRAM factor to try), |
| 99 | # dry_test is True while still bisecting. |
| 100 | upper = 1.0 |
| 101 | lower = 0.7 |
| 102 | MAX_ITERATIONS = 8 |
| 103 | if len(alloc_results) == 0: |
| 104 | # First iteration, try max SRAM, keep the result if it succeeds |
| 105 | return (upper, False) |
| 106 | elif len(alloc_results) == 1: |
| 107 | if alloc_results[0]: |
| 108 | # The allocator succeeded at first try; stop |
| 109 | return (None, False) |
| 110 | else: |
| 111 | # Start bisecting, try lowerbound SRAM |
| 112 | return (lower, True) |
| 113 | elif len(alloc_results) > MAX_ITERATIONS: |
| 114 | # Stop |
| 115 | return (None, False) |
| 116 | if not alloc_results[1]: |
| 117 | # Allocation at lower failed; search interval 0 - lower |
| 118 | upper = lower |
| 119 | lower = 0 |
| 120 | best = lower |
| 121 | for success in alloc_results[2:]: |
| 122 | middle = (lower + upper) / 2 |
| 123 | if success: |
| 124 | best = max(best, middle) |
| 125 | lower = middle |
| 126 | else: |
| 127 | upper = middle |
| 128 | if len(alloc_results) == MAX_ITERATIONS: |
| 129 | # Done bisecting; repeat the best match, but not as dry test |
| 130 | return (best, False) |
| 131 | # Next try; run only as dry test |
| 132 | return ((lower + upper) / 2, True) |
| 133 | |
| 134 | |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 135 | def _record_operator(op, arch): |
| 136 | if op.type != Op.Const: |
| 137 | DebugDatabase.add_source(op) |
| 138 | |
| 139 | |
Tim Hall | d8339a7 | 2021-05-27 18:49:40 +0100 | [diff] [blame] | 140 | def _check_schedule(nng, arch, scheduler_options): |
| 141 | # check sram usage for optimisation strategy |
| 142 | sram_usage = nng.get_root_subgraph().memory_used.get(MemArea.Sram) |
| 143 | if sram_usage is not None and scheduler_options.optimization_strategy == OptimizationStrategy.Performance: |
| 144 | if sram_usage > scheduler_options.optimization_sram_limit: |
| 145 | print( |
| 146 | f"Warning: SRAM target for arena memory area exceeded." |
| 147 | f" Target = {scheduler_options.optimization_sram_limit} Bytes," |
| 148 | f" Actual = {sram_usage} Bytes" |
| 149 | ) |
| 150 | |
| 151 | |
wilisa01 | 89a8cdd | 2022-08-22 16:13:06 +0000 | [diff] [blame] | 152 | def compiler_driver(nng, arch, options, scheduler_options, network_type, output_basename): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 153 | assert verify_graph_health(nng) |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 154 | |
| 155 | # Pre-optimisation operator tracking |
| 156 | for sg in nng.subgraphs: |
| 157 | visit_graph_post_order(sg.output_tensors, arch, [], [_record_operator]) |
| 158 | |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 159 | nng = graph_optimiser.optimise_graph(nng, arch, network_type, options.verbose_graph) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 160 | assert verify_graph_health(nng) |
| 161 | |
| 162 | if options.verbose_quantization: |
| 163 | nng.print_graph_with_tensor_quantization() |
| 164 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 165 | nng = mark_tensors.mark_tensor_purpose(nng, arch, options.verbose_tensor_purpose) |
| 166 | assert verify_graph_health(nng) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 167 | pass_packing.pack_into_passes(nng, arch, options.verbose_packing) |
| 168 | assert verify_graph_health(nng) |
| 169 | |
| 170 | extract_npu_subgraphs.extract_npu_subgraphs(nng, arch) |
| 171 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 172 | assert verify_graph_health(nng) |
| 173 | if options.timing: |
| 174 | start = time.time() |
| 175 | |
| 176 | # Run the scheduler |
Tim Hall | d8339a7 | 2021-05-27 18:49:40 +0100 | [diff] [blame] | 177 | scheduler.schedule_passes(nng, arch, options, scheduler_options) |
| 178 | _check_schedule(nng, arch, scheduler_options) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 179 | |
| 180 | if options.timing: |
| 181 | stop = time.time() |
| 182 | print("Scheduling took %f s" % (stop - start)) |
| 183 | start = time.time() |
| 184 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 185 | # LiveRanges for constant tensors for all Npu subgraphs |
| 186 | permanent_storage = arch.permanent_storage_mem_area |
| 187 | lr_graph_flash = live_range.LiveRangeGraph() |
| 188 | |
| 189 | # Placeholders for scratch and flash tensors that are common for all Npu subgraphs |
| 190 | scratch_tens = None |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 191 | scratch_fast_tens = None |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 192 | flash_tens = None |
| 193 | |
Dwight Lidman | 62cdfe5 | 2021-10-11 16:39:10 +0200 | [diff] [blame] | 194 | # Create list of NPU subgraphs with same order as the list of all subgraphs |
| 195 | npu_subgraphs = [sg for sg in nng.subgraphs if sg.placement == PassPlacement.Npu] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 196 | |
Dwight Lidman | 62cdfe5 | 2021-10-11 16:39:10 +0200 | [diff] [blame] | 197 | # Calculate live ranges for all constant Npu tensors, in permanent storage |
| 198 | for sg in npu_subgraphs: |
| 199 | lr_graph_flash = live_range.create_linear_live_range_graph( |
Jonas Ohlsson | d857507 | 2022-03-30 10:30:25 +0200 | [diff] [blame] | 200 | sg, |
| 201 | permanent_storage, |
| 202 | MemType.Permanent_NPU, |
| 203 | lr_graph=lr_graph_flash, |
Dwight Lidman | 62cdfe5 | 2021-10-11 16:39:10 +0200 | [diff] [blame] | 204 | ) |
| 205 | |
| 206 | if npu_subgraphs: |
Tim Hall | 25f605c | 2020-05-18 18:04:26 +0100 | [diff] [blame] | 207 | # Allocate all Npu constant tensors to the first Npu subgraph since it is |
| 208 | # processed first during serialization into tensors |
Dwight Lidman | 62cdfe5 | 2021-10-11 16:39:10 +0200 | [diff] [blame] | 209 | first_npu_sg = npu_subgraphs[0] |
Tim Hall | 25f605c | 2020-05-18 18:04:26 +0100 | [diff] [blame] | 210 | tensor_allocation.allocate_tensors( |
| 211 | nng, |
| 212 | first_npu_sg, |
| 213 | arch, |
| 214 | permanent_storage, |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 215 | set((MemType.Permanent_NPU,)), |
Louis Verhaard | 0b9c9a3 | 2020-09-15 14:05:38 +0200 | [diff] [blame] | 216 | tensor_allocator=TensorAllocator.LinearAlloc, |
| 217 | verbose_allocation=options.verbose_allocation, |
Louis Verhaard | 0b9c9a3 | 2020-09-15 14:05:38 +0200 | [diff] [blame] | 218 | lr_graph=lr_graph_flash, |
Tim Hall | 25f605c | 2020-05-18 18:04:26 +0100 | [diff] [blame] | 219 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 220 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 221 | root_sg = nng.get_root_subgraph() |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 222 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 223 | # Generate command streams and serialise Npu-ops into tensors |
Dwight Lidman | 62cdfe5 | 2021-10-11 16:39:10 +0200 | [diff] [blame] | 224 | for sg in npu_subgraphs: |
| 225 | high_level_command_stream_generator.generate_high_level_command_stream_for_schedule( |
| 226 | nng, sg, arch, options.verbose_high_level_command_stream |
| 227 | ) |
| 228 | lut.optimize_high_level_cmd_stream(sg, arch) |
| 229 | high_level_command_to_npu_op.generate_register_command_stream_for_sg( |
| 230 | nng, sg, arch, options.verbose_register_command_stream |
| 231 | ) |
| 232 | scratch_tens, scratch_fast_tens, flash_tens = npu_serialisation.serialise_npu_subgraph_into_tensors( |
| 233 | sg, arch, scratch_tens, scratch_fast_tens, flash_tens |
| 234 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 235 | |
Johan Alfvén | 673683b | 2022-09-05 09:39:47 +0200 | [diff] [blame] | 236 | # Create list of CPU subgraphs with same order as the list of all subgraphs |
| 237 | cpu_subgraphs = [sg for sg in nng.subgraphs if sg.placement == PassPlacement.Cpu] |
| 238 | for sg in cpu_subgraphs: |
| 239 | npu_serialisation.rewrite_npu_call_ops(sg, arch) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 240 | |
Jacob Bohlin | 268394d | 2020-08-13 13:24:59 +0200 | [diff] [blame] | 241 | # Set Scratch and Fast_scratch Tensor size |
| 242 | if scratch_tens is not None: |
| 243 | scratch_tens.set_all_shapes([root_sg.memory_used_per_type.get(MemType.Scratch, 0)]) |
| 244 | if scratch_fast_tens is not None: |
| 245 | scratch_fast_tens.set_all_shapes([root_sg.memory_used_per_type.get(MemType.Scratch_fast, 0)]) |
| 246 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 247 | # Allocate all Cpu constant tensors, this is done last because the Npu-ops |
| 248 | # have to be serialized into flash and scratch tensors first |
| 249 | tensor_allocation.allocate_tensors( |
| 250 | nng, |
| 251 | root_sg, |
| 252 | arch, |
| 253 | permanent_storage, |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 254 | set((MemType.Permanent_CPU,)), |
Louis Verhaard | 0b9c9a3 | 2020-09-15 14:05:38 +0200 | [diff] [blame] | 255 | tensor_allocator=TensorAllocator.LinearAlloc, |
| 256 | verbose_allocation=options.verbose_allocation, |
Tim Hall | b9b515c | 2020-11-01 21:27:19 +0000 | [diff] [blame] | 257 | cpu_tensor_alignment=options.cpu_tensor_alignment, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 258 | ) |
| 259 | |
wilisa01 | 89a8cdd | 2022-08-22 16:13:06 +0000 | [diff] [blame] | 260 | npu_performance.calc_new_performance_for_network( |
| 261 | nng, arch, network_type, options.verbose_performance, output_basename |
| 262 | ) |