blob: 0cb40ed0a3246b1b9c770e5464ecf0a5266f884c [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Serialises and packs an NPU subgraph into tensors.
Diego Russoea6111a2020-04-14 18:41:58 +010018import struct
19
20import numpy as np
21
22from . import driver_actions
Tim Hall79d07d22020-04-27 18:20:16 +010023from .data_type import DataType
Diego Russoe8a10452020-04-21 17:39:10 +010024from .nn_graph import PassPlacement
25from .operation import Operation
26from .tensor import MemArea
27from .tensor import Tensor
28from .tensor import TensorFormat
29from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010030
31
32def make_memory_tensor(name, mem_area, sz, want_values, arch):
33 tens = Tensor([sz], DataType.uint8, name)
34 tens.mem_area = mem_area
35 tens.purpose = TensorPurpose.FeatureMap
36 tens.set_format(TensorFormat.NHWC, arch)
37 if want_values:
38 tens.values = np.zeros(tens.shape, np.uint8)
39 return tens
40
41
42def copy_compressed_values_to_memory_tensor(memory_tensor, src_tensor):
43 start_addr = src_tensor.address
44 for compressed_values in src_tensor.compressed_values:
45 end_addr = start_addr + len(compressed_values)
46 memory_tensor.values[start_addr:end_addr] = compressed_values
47 start_addr = end_addr
48
Charles Xu78792222020-05-13 10:15:26 +020049def copy_ifm_values_to_memory_tensor(memory_tensor, src_tensor):
50 start_addr = src_tensor.address
51 end_addr = start_addr + src_tensor.quant_values.size
52 memory_tensor.values[start_addr:end_addr] = src_tensor.quant_values
Tim Hall79d07d22020-04-27 18:20:16 +010053
54def serialise_npu_subgraph_into_tensors(nng, sg, arch, scratch_tens, flash_tens):
55 if sg.placement != PassPlacement.Npu:
56 return scratch_tens, flash_tens
57
58 flash_area = arch.permanent_storage_mem_area
59 scratch_area = MemArea.Sram
60
61 flash_size = sg.memory_used.get(flash_area, 0)
62 scratch_size = sg.memory_used.get(scratch_area, 0)
63
64 # Prepare driver actions for this command tensor
65 da_list = []
66 driver_actions.emit_fourcc(da_list, "COP1")
67 driver_actions.emit_config(da_list, 0, 1, arch)
68 driver_actions.emit_cmd_stream_header(da_list, len(sg.register_command_stream))
69
70 # Append command stream words
71 da_list.extend(sg.register_command_stream)
72
73 # Convert to bytes
74 payload_bytes = struct.pack("<{0}I".format(len(da_list)), *da_list)
75
76 command_stream_size_bytes = len(payload_bytes)
77
78 # Adjust the bits per element calculation to exclude metadata generated by Vela
79 nng.total_size[flash_area] = nng.total_size.get(flash_area, 0) - flash_size - command_stream_size_bytes
80 nng.total_elements[flash_area] = nng.total_elements.get(flash_area, 0) - flash_size - command_stream_size_bytes
81 nng.total_size[scratch_area] = nng.total_size.get(scratch_area, 0) - scratch_size
82 nng.total_elements[scratch_area] = nng.total_elements.get(scratch_area, 0) - scratch_size
83
Diego Russoea6111a2020-04-14 18:41:58 +010084 if flash_tens == scratch_tens is None:
Tim Hall79d07d22020-04-27 18:20:16 +010085 # First Npu subgraph, create scratch and flash tensors
86 sg.scratch_tensor = make_memory_tensor(sg.name + "_scratch", scratch_area, scratch_size, False, arch)
87 sg.scratch_tensor.purpose = TensorPurpose.Scratch
88 sg.flash_tensor = make_memory_tensor(sg.name + "_flash", flash_area, flash_size, True, arch)
89 else:
90 sg.scratch_tensor = scratch_tens
91 sg.scratch_tensor.shape[0] += scratch_size
92 sg.flash_tensor = flash_tens
93 sg.flash_tensor.shape[0] += flash_size
94
95 for cps in sg.cascaded_passes:
96 for ps in cps.passes:
Charles Xu78792222020-05-13 10:15:26 +020097 if ps.placement == PassPlacement.Npu:
98 if ps.weight_tensor != None:
99 # For DMA ops, ps.weight_tensor is referring to the SRAM weight tensor and therefore the address
100 # is pointing at the destination address of where the weights should be placed in SRAM.
101 # This ensures that the Flash weight tensor is used instead and thus gets the correct address.
102 if ps.weight_tensor.ops[0].type == "DMA":
103 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.weight_tensor.ops[0].inputs[0])
104 else:
105 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.weight_tensor)
Tim Hall79d07d22020-04-27 18:20:16 +0100106
Charles Xu78792222020-05-13 10:15:26 +0200107 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.scale_tensor)
108
109 if ps.ifm_tensor != None and ps.ifm_tensor.mem_area != MemArea.Sram:
110 copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.ifm_tensor)
111 if ps.ifm2_tensor != None and ps.ifm2_tensor.mem_area != MemArea.Sram:
112 copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.ifm2_tensor)
Tim Hall79d07d22020-04-27 18:20:16 +0100113
114 sg.command_stream_tensor = make_memory_tensor(
115 sg.name + "_command_stream", flash_area, command_stream_size_bytes, True, arch
116 )
117 sg.command_stream_tensor.values = np.frombuffer(payload_bytes, dtype=np.uint8)
118
119 return sg.scratch_tensor, sg.flash_tensor
120
121
122def add_const_tens_to_startup_cascaded_pass(startup_cps, tens):
123 op = Operation("Const", tens.name + "_const")
124 op.outputs = [tens]
125 tens.ops = [op]
126 startup_cps.passes[0].ops.insert(0, op)
127 startup_cps.passes[0].outputs.insert(0, tens)
128 startup_cps.outputs.insert(0, tens)
129
130
131def rewrite_npu_call_ops(nng, sg, arch):
132 if sg.placement != PassPlacement.Cpu:
133 return
134
135 startup_cps = sg.cascaded_passes[0]
136
137 for idx, cps in enumerate(sg.cascaded_passes):
138 for ps in cps.passes:
139 for op in ps.ops:
140 if op.type == "NpuOp":
141 callee = op.attrs["subgraph"]
142 op.attrs["custom_options"] = {"type": op.type}
143
144 sz = 0
145 for tens in [callee.scratch_tensor, callee.flash_tensor, callee.command_stream_tensor]:
146 op.inputs.insert(0, tens)
147 ps.inputs.insert(0, tens)
148 cps.inputs.insert(0, tens)
149 if tens != callee.scratch_tensor:
150 add_const_tens_to_startup_cascaded_pass(startup_cps, tens)
151 sz += tens.storage_size()
152
153 for prev_cps in sg.cascaded_passes[: idx + 1]:
154 prev_cps.sram_used += sz
155
156 if callee.scratch_tensor is not None:
157 cps.sram_used += callee.scratch_tensor.storage_size()