blob: ad4d29caa1a6133481d46d0cd4a27782408c0b9b [file] [log] [blame]
erik.andersson@arm.com460c6892021-02-24 14:38:09 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Serialises and packs an NPU subgraph into tensors.
Diego Russoea6111a2020-04-14 18:41:58 +010018import numpy as np
19
20from . import driver_actions
Tim Hall79d07d22020-04-27 18:20:16 +010021from .data_type import DataType
Diego Russoe8a10452020-04-21 17:39:10 +010022from .nn_graph import PassPlacement
Louis Verhaardaee5d752020-09-30 09:01:52 +020023from .operation import Op
Diego Russoe8a10452020-04-21 17:39:10 +010024from .operation import Operation
25from .tensor import MemArea
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020026from .tensor import MemType
Diego Russoe8a10452020-04-21 17:39:10 +010027from .tensor import Tensor
28from .tensor import TensorFormat
29from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010030
31
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020032def make_memory_tensor(name, mem_area, mem_type, sz, want_values, arch):
Tim Hall79d07d22020-04-27 18:20:16 +010033 tens = Tensor([sz], DataType.uint8, name)
34 tens.mem_area = mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020035 tens.mem_type = mem_type
Tim Hall79d07d22020-04-27 18:20:16 +010036 tens.purpose = TensorPurpose.FeatureMap
37 tens.set_format(TensorFormat.NHWC, arch)
38 if want_values:
39 tens.values = np.zeros(tens.shape, np.uint8)
40 return tens
41
42
43def copy_compressed_values_to_memory_tensor(memory_tensor, src_tensor):
44 start_addr = src_tensor.address
45 for compressed_values in src_tensor.compressed_values:
46 end_addr = start_addr + len(compressed_values)
47 memory_tensor.values[start_addr:end_addr] = compressed_values
48 start_addr = end_addr
49
Tim Hallc30f4952020-06-15 20:47:35 +010050
Charles Xu78792222020-05-13 10:15:26 +020051def copy_ifm_values_to_memory_tensor(memory_tensor, src_tensor):
52 start_addr = src_tensor.address
Fredrik Svedberg0f98b362020-09-29 10:00:39 +020053 values = src_tensor.quant_values.flatten() if src_tensor.quant_values is not None else src_tensor.values.flatten()
Fredrik Svedbergbb1a92a2020-08-27 15:51:50 +020054 if src_tensor.dtype.size_in_bytes() > 1:
55 values = np.frombuffer(values.tobytes(), dtype=np.uint8)
Charles Xu9a03fdf2020-07-02 15:12:40 +020056 end_addr = start_addr + values.size
57 memory_tensor.values[start_addr:end_addr] = values
Tim Hall79d07d22020-04-27 18:20:16 +010058
Tim Hallc30f4952020-06-15 20:47:35 +010059
Tim Hall03d40a22021-04-22 12:08:28 +010060def serialise_npu_subgraph_into_tensors(sg, arch, scratch_tens, scratch_fast_tens, flash_tens):
Tim Hall79d07d22020-04-27 18:20:16 +010061 if sg.placement != PassPlacement.Npu:
Patrik Gustavsson3ab94522020-06-29 17:36:55 +020062 return scratch_tens, scratch_fast_tens, flash_tens
Tim Hall79d07d22020-04-27 18:20:16 +010063
64 flash_area = arch.permanent_storage_mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020065 scratch_area = arch.feature_map_storage_mem_area
Patrik Gustavsson3ab94522020-06-29 17:36:55 +020066 scratch_fast_area = arch.fast_storage_mem_area
Tim Hall79d07d22020-04-27 18:20:16 +010067
68 flash_size = sg.memory_used.get(flash_area, 0)
69 scratch_size = sg.memory_used.get(scratch_area, 0)
70
Louis Verhaard52078302020-11-18 13:35:06 +010071 payload_bytes = driver_actions.create_driver_payload(sg.register_command_stream, arch)
Tim Hall79d07d22020-04-27 18:20:16 +010072
73 command_stream_size_bytes = len(payload_bytes)
74
Diego Russoea6111a2020-04-14 18:41:58 +010075 if flash_tens == scratch_tens is None:
Tim Hall79d07d22020-04-27 18:20:16 +010076 # First Npu subgraph, create scratch and flash tensors
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020077 sg.scratch_tensor = make_memory_tensor(
78 sg.name + "_scratch", scratch_area, MemType.Scratch, scratch_size, False, arch
79 )
Tim Hall79d07d22020-04-27 18:20:16 +010080 sg.scratch_tensor.purpose = TensorPurpose.Scratch
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020081 sg.flash_tensor = make_memory_tensor(
82 sg.name + "_flash", flash_area, MemType.Permanent_CPU, flash_size, True, arch
83 )
Patrik Gustavsson3ab94522020-06-29 17:36:55 +020084 sg.scratch_fast_tensor = make_memory_tensor(
Jacob Bohlin268394d2020-08-13 13:24:59 +020085 sg.name + "_scratch_fast", scratch_fast_area, MemType.Scratch_fast, 0, False, arch
Patrik Gustavsson3ab94522020-06-29 17:36:55 +020086 )
87 sg.scratch_fast_tensor.purpose = TensorPurpose.Scratch
Tim Hall79d07d22020-04-27 18:20:16 +010088 else:
89 sg.scratch_tensor = scratch_tens
90 sg.scratch_tensor.shape[0] += scratch_size
91 sg.flash_tensor = flash_tens
92 sg.flash_tensor.shape[0] += flash_size
93
Patrik Gustavsson3ab94522020-06-29 17:36:55 +020094 sg.scratch_fast_tensor = scratch_fast_tens
95 sg.scratch_fast_tensor.shape[0] = 0
96
Tim Hall79d07d22020-04-27 18:20:16 +010097 for cps in sg.cascaded_passes:
98 for ps in cps.passes:
Charles Xu78792222020-05-13 10:15:26 +020099 if ps.placement == PassPlacement.Npu:
Tim Hallc30f4952020-06-15 20:47:35 +0100100 if ps.weight_tensor is not None:
Charles Xu78792222020-05-13 10:15:26 +0200101 # For DMA ops, ps.weight_tensor is referring to the SRAM weight tensor and therefore the address
102 # is pointing at the destination address of where the weights should be placed in SRAM.
103 # This ensures that the Flash weight tensor is used instead and thus gets the correct address.
Louis Verhaardaee5d752020-09-30 09:01:52 +0200104 if ps.weight_tensor.ops[0].type == Op.DMA:
Charles Xu78792222020-05-13 10:15:26 +0200105 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.weight_tensor.ops[0].inputs[0])
106 else:
107 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.weight_tensor)
Tim Hall79d07d22020-04-27 18:20:16 +0100108
Andreas Nevalainen897cc142020-10-28 15:42:08 +0100109 if ps.scale_tensor.ops[0].type == Op.DMA:
110 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.scale_tensor.ops[0].inputs[0])
111 else:
112 copy_compressed_values_to_memory_tensor(sg.flash_tensor, ps.scale_tensor)
Charles Xu78792222020-05-13 10:15:26 +0200113
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200114 if ps.lut_tensor is not None:
115 copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.lut_tensor)
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200116 if ps.ifm_tensor is not None and ps.ifm_tensor.mem_type not in (MemType.Scratch, MemType.Scratch_fast):
Charles Xu78792222020-05-13 10:15:26 +0200117 copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.ifm_tensor)
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200118 if ps.ifm2_tensor is not None and (
119 ps.ifm2_tensor.mem_type not in (MemType.Scratch, MemType.Scratch_fast)
120 ):
Charles Xu78792222020-05-13 10:15:26 +0200121 copy_ifm_values_to_memory_tensor(sg.flash_tensor, ps.ifm2_tensor)
Tim Hall79d07d22020-04-27 18:20:16 +0100122 sg.command_stream_tensor = make_memory_tensor(
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200123 sg.name + "_command_stream", flash_area, MemType.Permanent_CPU, command_stream_size_bytes, True, arch
Tim Hall79d07d22020-04-27 18:20:16 +0100124 )
125 sg.command_stream_tensor.values = np.frombuffer(payload_bytes, dtype=np.uint8)
126
Patrik Gustavsson3ab94522020-06-29 17:36:55 +0200127 return sg.scratch_tensor, sg.scratch_fast_tensor, sg.flash_tensor
Tim Hall79d07d22020-04-27 18:20:16 +0100128
129
130def add_const_tens_to_startup_cascaded_pass(startup_cps, tens):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200131 op = Operation(Op.Const, tens.name + "_const")
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100132 op.set_output_tensor(tens)
Tim Hall79d07d22020-04-27 18:20:16 +0100133 startup_cps.passes[0].ops.insert(0, op)
134 startup_cps.passes[0].outputs.insert(0, tens)
135 startup_cps.outputs.insert(0, tens)
136
137
Tim Hall03d40a22021-04-22 12:08:28 +0100138def rewrite_npu_call_ops(sg, arch):
Tim Hall79d07d22020-04-27 18:20:16 +0100139 if sg.placement != PassPlacement.Cpu:
140 return
141
142 startup_cps = sg.cascaded_passes[0]
143
144 for idx, cps in enumerate(sg.cascaded_passes):
145 for ps in cps.passes:
146 for op in ps.ops:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200147 if op.type == Op.CustomNpuOp:
Tim Hall79d07d22020-04-27 18:20:16 +0100148 callee = op.attrs["subgraph"]
Tim Hall79d07d22020-04-27 18:20:16 +0100149
150 sz = 0
Patrik Gustavsson3ab94522020-06-29 17:36:55 +0200151 for tens in [
152 callee.scratch_fast_tensor,
153 callee.scratch_tensor,
154 callee.flash_tensor,
155 callee.command_stream_tensor,
156 ]:
Tim Hall79d07d22020-04-27 18:20:16 +0100157 op.inputs.insert(0, tens)
158 ps.inputs.insert(0, tens)
159 cps.inputs.insert(0, tens)
Patrik Gustavsson3ab94522020-06-29 17:36:55 +0200160 if tens != callee.scratch_tensor and tens != callee.scratch_fast_tensor:
Tim Hall79d07d22020-04-27 18:20:16 +0100161 add_const_tens_to_startup_cascaded_pass(startup_cps, tens)
162 sz += tens.storage_size()
163
164 for prev_cps in sg.cascaded_passes[: idx + 1]:
165 prev_cps.sram_used += sz
166
167 if callee.scratch_tensor is not None:
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200168 if callee.scratch_tensor.mem_area == MemArea.Sram:
169 cps.sram_used += callee.scratch_tensor.storage_size()
Patrik Gustavsson3ab94522020-06-29 17:36:55 +0200170
171 if callee.scratch_fast_tensor is not None:
172 if callee.scratch_fast_tensor.mem_area == MemArea.Sram:
173 cps.sram_used += callee.scratch_fast_tensor.storage_size()