Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Serialises and packs an NPU subgraph into tensors. |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 18 | import struct |
| 19 | |
| 20 | import numpy as np |
| 21 | |
| 22 | from . import driver_actions |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 23 | from .data_type import DataType |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 24 | from .nn_graph import PassPlacement |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 25 | from .operation import Op |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 26 | from .operation import Operation |
| 27 | from .tensor import MemArea |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 28 | from .tensor import MemType |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 29 | from .tensor import Tensor |
| 30 | from .tensor import TensorFormat |
| 31 | from .tensor import TensorPurpose |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 32 | |
| 33 | |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 34 | def make_memory_tensor(name, mem_area, mem_type, sz, want_values, arch): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 35 | tens = Tensor([sz], DataType.uint8, name) |
| 36 | tens.mem_area = mem_area |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 37 | tens.mem_type = mem_type |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 38 | tens.purpose = TensorPurpose.FeatureMap |
| 39 | tens.set_format(TensorFormat.NHWC, arch) |
| 40 | if want_values: |
| 41 | tens.values = np.zeros(tens.shape, np.uint8) |
| 42 | return tens |
| 43 | |
| 44 | |
| 45 | def copy_compressed_values_to_memory_tensor(memory_tensor, src_tensor): |
| 46 | start_addr = src_tensor.address |
| 47 | for compressed_values in src_tensor.compressed_values: |
| 48 | end_addr = start_addr + len(compressed_values) |
| 49 | memory_tensor.values[start_addr:end_addr] = compressed_values |
| 50 | start_addr = end_addr |
| 51 | |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 52 | |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 53 | def copy_ifm_values_to_memory_tensor(memory_tensor, src_tensor): |
| 54 | start_addr = src_tensor.address |
Fredrik Svedberg | 0f98b36 | 2020-09-29 10:00:39 +0200 | [diff] [blame] | 55 | values = src_tensor.quant_values.flatten() if src_tensor.quant_values is not None else src_tensor.values.flatten() |
Fredrik Svedberg | bb1a92a | 2020-08-27 15:51:50 +0200 | [diff] [blame] | 56 | if src_tensor.dtype.size_in_bytes() > 1: |
| 57 | values = np.frombuffer(values.tobytes(), dtype=np.uint8) |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 58 | end_addr = start_addr + values.size |
| 59 | memory_tensor.values[start_addr:end_addr] = values |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 60 | |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 61 | |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 62 | def serialise_npu_subgraph_into_tensors(nng, sg, arch, scratch_tens, scratch_fast_tens, flash_tens): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 63 | if sg.placement != PassPlacement.Npu: |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 64 | return scratch_tens, scratch_fast_tens, flash_tens |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 65 | |
| 66 | flash_area = arch.permanent_storage_mem_area |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 67 | scratch_area = arch.feature_map_storage_mem_area |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 68 | scratch_fast_area = arch.fast_storage_mem_area |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 69 | |
| 70 | flash_size = sg.memory_used.get(flash_area, 0) |
| 71 | scratch_size = sg.memory_used.get(scratch_area, 0) |
| 72 | |
| 73 | # Prepare driver actions for this command tensor |
| 74 | da_list = [] |
| 75 | driver_actions.emit_fourcc(da_list, "COP1") |
| 76 | driver_actions.emit_config(da_list, 0, 1, arch) |
| 77 | driver_actions.emit_cmd_stream_header(da_list, len(sg.register_command_stream)) |
| 78 | |
| 79 | # Append command stream words |
| 80 | da_list.extend(sg.register_command_stream) |
| 81 | |
| 82 | # Convert to bytes |
| 83 | payload_bytes = struct.pack("<{0}I".format(len(da_list)), *da_list) |
| 84 | |
| 85 | command_stream_size_bytes = len(payload_bytes) |
| 86 | |
| 87 | # Adjust the bits per element calculation to exclude metadata generated by Vela |
| 88 | nng.total_size[flash_area] = nng.total_size.get(flash_area, 0) - flash_size - command_stream_size_bytes |
| 89 | nng.total_elements[flash_area] = nng.total_elements.get(flash_area, 0) - flash_size - command_stream_size_bytes |
| 90 | nng.total_size[scratch_area] = nng.total_size.get(scratch_area, 0) - scratch_size |
| 91 | nng.total_elements[scratch_area] = nng.total_elements.get(scratch_area, 0) - scratch_size |
| 92 | |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 93 | if scratch_area != scratch_fast_area: |
| 94 | nng.total_size[scratch_fast_area] = nng.total_size.get(scratch_fast_area, 0) |
| 95 | nng.total_elements[scratch_fast_area] = nng.total_elements.get(scratch_fast_area, 0) |
| 96 | |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 97 | if flash_tens == scratch_tens is None: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 98 | # First Npu subgraph, create scratch and flash tensors |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 99 | sg.scratch_tensor = make_memory_tensor( |
| 100 | sg.name + "_scratch", scratch_area, MemType.Scratch, scratch_size, False, arch |
| 101 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 102 | sg.scratch_tensor.purpose = TensorPurpose.Scratch |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 103 | sg.flash_tensor = make_memory_tensor( |
| 104 | sg.name + "_flash", flash_area, MemType.Permanent_CPU, flash_size, True, arch |
| 105 | ) |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 106 | sg.scratch_fast_tensor = make_memory_tensor( |
Jacob Bohlin | 268394d | 2020-08-13 13:24:59 +0200 | [diff] [blame] | 107 | sg.name + "_scratch_fast", scratch_fast_area, MemType.Scratch_fast, 0, False, arch |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 108 | ) |
| 109 | sg.scratch_fast_tensor.purpose = TensorPurpose.Scratch |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 110 | else: |
| 111 | sg.scratch_tensor = scratch_tens |
| 112 | sg.scratch_tensor.shape[0] += scratch_size |
| 113 | sg.flash_tensor = flash_tens |
| 114 | sg.flash_tensor.shape[0] += flash_size |
| 115 | |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 116 | sg.scratch_fast_tensor = scratch_fast_tens |
| 117 | sg.scratch_fast_tensor.shape[0] = 0 |
| 118 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 119 | for cps in sg.cascaded_passes: |
| 120 | for ps in cps.passes: |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 121 | if ps.placement == PassPlacement.Npu: |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 122 | if ps.weight_tensor is not None: |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 123 | # For DMA ops, ps.weight_tensor is referring to the SRAM weight tensor and therefore the address |
| 124 | # is pointing at the destination address of where the weights should be placed in SRAM. |
| 125 | # This ensures that the Flash weight tensor is used instead and thus gets the correct address. |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 126 | if ps.weight_tensor.ops[0].type == Op.DMA: |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 127 | copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.weight_tensor.ops[0].inputs[0]) |
| 128 | else: |
| 129 | copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.weight_tensor) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 130 | |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 131 | copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.scale_tensor) |
| 132 | |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 133 | if ps.lut_tensor is not None: |
| 134 | copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.lut_tensor) |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 135 | if ps.ifm_tensor is not None and ps.ifm_tensor.mem_type not in (MemType.Scratch, MemType.Scratch_fast): |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 136 | copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.ifm_tensor) |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 137 | if ps.ifm2_tensor is not None and ( |
| 138 | ps.ifm2_tensor.mem_type not in (MemType.Scratch, MemType.Scratch_fast) |
| 139 | ): |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 140 | copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.ifm2_tensor) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 141 | |
| 142 | sg.command_stream_tensor = make_memory_tensor( |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 143 | sg.name + "_command_stream", flash_area, MemType.Permanent_CPU, command_stream_size_bytes, True, arch |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 144 | ) |
| 145 | sg.command_stream_tensor.values = np.frombuffer(payload_bytes, dtype=np.uint8) |
| 146 | |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 147 | return sg.scratch_tensor, sg.scratch_fast_tensor, sg.flash_tensor |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 148 | |
| 149 | |
| 150 | def add_const_tens_to_startup_cascaded_pass(startup_cps, tens): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 151 | op = Operation(Op.Const, tens.name + "_const") |
Michael McGeagh | c5b549b | 2020-08-07 11:54:28 +0100 | [diff] [blame] | 152 | op.set_output_tensor(tens) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 153 | startup_cps.passes[0].ops.insert(0, op) |
| 154 | startup_cps.passes[0].outputs.insert(0, tens) |
| 155 | startup_cps.outputs.insert(0, tens) |
| 156 | |
| 157 | |
| 158 | def rewrite_npu_call_ops(nng, sg, arch): |
| 159 | if sg.placement != PassPlacement.Cpu: |
| 160 | return |
| 161 | |
| 162 | startup_cps = sg.cascaded_passes[0] |
| 163 | |
| 164 | for idx, cps in enumerate(sg.cascaded_passes): |
| 165 | for ps in cps.passes: |
| 166 | for op in ps.ops: |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 167 | if op.type == Op.CustomNpuOp: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 168 | callee = op.attrs["subgraph"] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 169 | |
| 170 | sz = 0 |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 171 | for tens in [ |
| 172 | callee.scratch_fast_tensor, |
| 173 | callee.scratch_tensor, |
| 174 | callee.flash_tensor, |
| 175 | callee.command_stream_tensor, |
| 176 | ]: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 177 | op.inputs.insert(0, tens) |
| 178 | ps.inputs.insert(0, tens) |
| 179 | cps.inputs.insert(0, tens) |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 180 | if tens != callee.scratch_tensor and tens != callee.scratch_fast_tensor: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 181 | add_const_tens_to_startup_cascaded_pass(startup_cps, tens) |
| 182 | sz += tens.storage_size() |
| 183 | |
| 184 | for prev_cps in sg.cascaded_passes[: idx + 1]: |
| 185 | prev_cps.sram_used += sz |
| 186 | |
| 187 | if callee.scratch_tensor is not None: |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 188 | if callee.scratch_tensor.mem_area == MemArea.Sram: |
| 189 | cps.sram_used += callee.scratch_tensor.storage_size() |
Patrik Gustavsson | 3ab9452 | 2020-06-29 17:36:55 +0200 | [diff] [blame] | 190 | |
| 191 | if callee.scratch_fast_tensor is not None: |
| 192 | if callee.scratch_fast_tensor.mem_area == MemArea.Sram: |
| 193 | cps.sram_used += callee.scratch_fast_tensor.storage_size() |