blob: 156090f73abac28006233d942051f6a2b63f48ed [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Build a live range graph for tensors in one or more subgraphs. Used for tensor allocation as well as in the scheduler.
18# Can work with either a pass packed subgraph or a scheduled subgraph.
Tim Hall79d07d22020-04-27 18:20:16 +010019from .high_level_command_stream_generator import calc_allowed_ofm_ifm_overlap_for_cascaded_pass
Diego Russoe8a10452020-04-21 17:39:10 +010020from .nn_graph import PassPlacement
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020021from .tensor import MemType
Diego Russoe8a10452020-04-21 17:39:10 +010022from .tensor import Tensor
Tim Hall79d07d22020-04-27 18:20:16 +010023
24
25class LiveRange:
Jacob Bohlin0628a8c2020-08-28 13:25:14 +020026 def __init__(self, tens, alignment):
Tim Hall79d07d22020-04-27 18:20:16 +010027 self.tensors = [] # Tensors that are assigned to the same LiveRange will be allocated to the same address
28 self.start_time = 99999999999
29 self.end_time = -1
30 self.size = 0
31 self.name = ""
Jacob Bohlin0628a8c2020-08-28 13:25:14 +020032 self.alignment = alignment
Tim Hall79d07d22020-04-27 18:20:16 +010033
34 if tens:
35 self.add_tensor(tens)
36
37 def __str__(self):
38 return "<live_range.LiveRange: '%s' start_time=%s, end_time=%s>" % (self.name, self.start_time, self.end_time)
39
40 __repr__ = __str__
41
42 def add_tensor(self, tens):
43 if self.size == 0:
44 self.size = tens.storage_size()
45 self.name = tens.name # LiveRange will be named after the first tensor added
46 else:
47 assert (
48 self.size >= tens.storage_size()
49 ), "Tensors assigned to the same LiveRange need to fit the size of the LiveRange."
50
51 self.tensors.append(tens)
52
53 def mark_usage(self, op_time):
54 if op_time == -1:
55 return
56 op_time_start = op_time
57 op_time_end = op_time + 1
58
59 self.start_time = min(self.start_time, op_time_start)
60 self.end_time = max(self.end_time, op_time_end)
61
62 def overlaps_ranges(self, other):
63 return max(self.start_time, other.start_time) < min(self.end_time, other.end_time)
64
65 def overlaps_address(self, other):
66 # Returns the first pair of tensors in this LiveRange and 'other' which have
67 # overlapping addresses
68 for tens in self.tensors:
69 for other_tens in other.tensors:
70 if max(tens.address, other_tens.address) < min(
71 tens.address + self.size, other_tens.address + other.size
72 ):
73 return True, tens, other_tens
74
75 return False, None, None
76
77 def __lt__(self, other):
78 if self.start_time != other.start_time:
79 return self.start_time < other.start_time
80 if self.end_time != other.end_time:
81 return self.end_time < other.end_time
82 if self.size != other.size:
83 return self.size < other.size
84 return self.name < other.name
85
86 def set_address(self, address):
87 # Set address of all unaddressed tensors in LiveRange
88 for tens in self.tensors:
Charles Xu04ce34c2020-06-23 12:42:28 +020089 if tens.address is None:
Charles Xu5b3dcd72020-05-28 07:20:52 +020090 addr = address
91 else:
92 # Limit to single tensor for the lr if the tensor address already assigned
93 assert len(self.tensors) == 1
94 addr = tens.address
95 tens.address = addr
96 # Also need to set the address to the tensor's cpu/npu clones
97 if tens.cpu_tensor is not None:
98 tens.cpu_tensor.address = addr
99 if tens.npu_tensor is not None:
100 tens.npu_tensor.address = addr
101 return addr
Tim Hall79d07d22020-04-27 18:20:16 +0100102
103 def get_alignment(self):
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200104 return self.alignment
Tim Hall79d07d22020-04-27 18:20:16 +0100105
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200106 def set_alignment(self, alignment):
107 self.alignment = max(self.alignment, alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100108
109
110def merge_memory_op_ranges(sg, lr_graph, tensor_should_be_ignored, target_mem_area):
111 for ps in sg.passes:
112 if ps.placement == PassPlacement.MemoryOnly:
113 # For memory only passes, e.g. Reshape. Add input and output tensor to the same LiveRange
114 input_tensor = ps.inputs[0]
115 output_tensor = ps.outputs[0]
116 # If the input or output tensor is tied to a Cpu tensor, i.e. a subgraph input
117 # or output, fuse the live-range with the Cpu tensors' live-range instead.
Diego Russoea6111a2020-04-14 18:41:58 +0100118 input_tensor = input_tensor.cpu_tensor if input_tensor.cpu_tensor is not None else input_tensor
119 output_tensor = output_tensor.cpu_tensor if output_tensor.cpu_tensor is not None else output_tensor
Tim Hall79d07d22020-04-27 18:20:16 +0100120 if not tensor_should_be_ignored(input_tensor, target_mem_area) and not tensor_should_be_ignored(
121 output_tensor, target_mem_area
122 ):
123 lr_graph.fuse_ranges(input_tensor, output_tensor)
124
125
126class LiveRangeGraph:
127 def __init__(self):
128 self.ranges = {} # tens -> range
129 self.allowed_overlaps = {} # (tens,tens) -> overlap_int
130 self.ignore_tensors = set()
131 self.processed_subgraphs = set()
132 self.current_time = 0
133
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200134 def get_or_create_range(self, tens, alignment=Tensor.AllocationQuantum):
Tim Hall79d07d22020-04-27 18:20:16 +0100135 for rng in self.ranges.values():
136 # Return the live range of the tensor (or it's cpu/npu clone)
137 if any(tensor in rng.tensors for tensor in [tens, tens.npu_tensor, tens.cpu_tensor]):
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200138 rng.set_alignment(alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100139 return rng
140
141 # No live range found for the tensor, create a new one
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200142 rng = LiveRange(tens, alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100143 self.ranges[tens] = rng
144 return rng
145
146 def fuse_ranges(self, in_tens, out_tens):
147 live_range = self.get_or_create_range(in_tens)
148 assert out_tens not in self.ranges, out_tens
149 live_range.add_tensor(out_tens)
150 self.ranges[out_tens] = live_range
151 return live_range
152
153
154def extract_live_ranges_from_passes(
155 sg,
156 target_mem_area,
157 mark_output_tensors_overlapping_with_input_tensors=False,
158 ignore_subgraph_input_output_tensors=False,
159):
160 lr_graph = LiveRangeGraph()
161
162 if ignore_subgraph_input_output_tensors:
163 lr_graph.ignore_tensors.update(sg.input_tensors)
164 lr_graph.ignore_tensors.update(sg.output_tensors)
165
166 def tensor_should_be_ignored(tens, target_mem_area):
167 if tens.mem_area != target_mem_area:
168 return True
169 if tens in lr_graph.ignore_tensors:
170 return True
171 if tens.name.endswith("reshape_shape_npu"):
172 # Reshape tensor, no need to allocate
173 lr_graph.ignore_tensors.add(tens)
174 return True
175 return False
176
177 # Merge only memory operations in the NPU subgraphs
178 if sg.placement == PassPlacement.Npu:
179 merge_memory_op_ranges(sg, lr_graph, tensor_should_be_ignored, target_mem_area)
180
181 for idx, ps in enumerate(sg.passes):
182 ps.time = 2 * idx
183
184 time_for_pass = ps.time
185
186 for tens in ps.inputs:
187 if tensor_should_be_ignored(tens, target_mem_area):
188 continue
189 rng = lr_graph.get_or_create_range(tens)
190 rng.mark_usage(time_for_pass)
191
192 for tens in ps.intermediates:
193 if tensor_should_be_ignored(tens, target_mem_area):
194 continue
195 rng = lr_graph.get_or_create_range(tens)
196 rng.mark_usage(time_for_pass)
197
198 for tens in ps.outputs:
199 if tensor_should_be_ignored(tens, target_mem_area):
200 continue
201 rng = lr_graph.get_or_create_range(tens)
202 output_time = time_for_pass
203 if not mark_output_tensors_overlapping_with_input_tensors and ps.is_element_wise:
204 output_time += 1
205 rng.mark_usage(output_time)
206
207 end_time = len(sg.passes) * 2
208 for tens in sg.output_tensors:
209 if tensor_should_be_ignored(tens, target_mem_area):
210 continue
211 rng = lr_graph.get_or_create_range(tens)
212 rng.mark_usage(end_time)
213
214 return lr_graph
215
216
217def extract_live_ranges_from_cascaded_passes(
218 sg,
219 target_mem_area,
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200220 target_mem_type_set,
Tim Hall79d07d22020-04-27 18:20:16 +0100221 mark_output_tensors_overlapping_with_input_tensors=False,
222 use_ifm_ofm_overlap=True,
223 ignore_subgraph_input_output_tensors=False,
224 lr_graph=None,
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200225 allocation_alignment=Tensor.AllocationQuantum,
Tim Hall79d07d22020-04-27 18:20:16 +0100226):
Diego Russoea6111a2020-04-14 18:41:58 +0100227 if lr_graph is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100228 lr_graph = LiveRangeGraph()
229
230 if sg in lr_graph.processed_subgraphs:
231 # if subgraph has been processed already, return the lr_graph as is
232 return lr_graph
233
234 if ignore_subgraph_input_output_tensors:
235 lr_graph.ignore_tensors.update(sg.input_tensors)
236 lr_graph.ignore_tensors.update(sg.output_tensors)
237
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200238 def tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
239 if tens.mem_area != target_mem_area or tens.mem_type not in target_mem_type_set:
Tim Hall79d07d22020-04-27 18:20:16 +0100240 return True
241 if tens in lr_graph.ignore_tensors:
242 return True
243 if tens.name.endswith("reshape_shape_npu"):
244 # Reshape tensor, no need to allocate
245 lr_graph.ignore_tensors.add(tens)
246 return True
247 return False
248
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200249 def merge_memory_op_ranges(sg, lr_graph, tensor_should_be_ignored, target_mem_area, target_mem_type_set):
250 for ps in sg.passes:
251 if ps.placement == PassPlacement.MemoryOnly:
252 # For memory only passes, e.g. Reshape. Add input and output tensor to the same LiveRange
253 input_tensor = ps.inputs[0]
254 output_tensor = ps.outputs[0]
255 # If the input or output tensor is tied to a Cpu tensor, i.e. a subgraph input
256 # or output, fuse the live-range with the Cpu tensors' live-range instead.
257 input_tensor = input_tensor.cpu_tensor if input_tensor.cpu_tensor is not None else input_tensor
258 output_tensor = output_tensor.cpu_tensor if output_tensor.cpu_tensor is not None else output_tensor
259 if not tensor_should_be_ignored(input_tensor, target_mem_area, target_mem_type_set) and not (
260 tensor_should_be_ignored(output_tensor, target_mem_area, target_mem_type_set)
261 ):
262 lr_graph.fuse_ranges(input_tensor, output_tensor)
263
Tim Hall79d07d22020-04-27 18:20:16 +0100264 # Merge only memory operations in the NPU subgraphs
265 if sg.placement == PassPlacement.Npu:
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200266 merge_memory_op_ranges(sg, lr_graph, tensor_should_be_ignored, target_mem_area, target_mem_type_set)
Tim Hall79d07d22020-04-27 18:20:16 +0100267
268 for cps in sg.cascaded_passes:
269 cps.time = lr_graph.current_time
270
271 time_for_pass = cps.time
272
273 is_element_wise = cps.is_element_wise
274
275 for tens in cps.inputs:
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200276 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100277 continue
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200278 rng = lr_graph.get_or_create_range(tens, allocation_alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100279 rng.mark_usage(time_for_pass)
280
281 cps_primary_op = cps.passes[0].primary_op
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200282
283 if cps_primary_op and cps_primary_op.type == "NpuOp" and MemType.Permanent_CPU not in target_mem_type_set:
Tim Hall79d07d22020-04-27 18:20:16 +0100284 # If the primary-op is an NpuOp that means this is where an Npu subgraph
285 # is called. Go into said subgraph and extract live ranges before continuing.
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200286 # Use default allocation alignment of 16 for Npu tensors
Tim Hall79d07d22020-04-27 18:20:16 +0100287 npu_sg = cps_primary_op.attrs["subgraph"]
288 lr_graph = extract_live_ranges_from_cascaded_passes(
289 npu_sg,
290 target_mem_area,
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200291 target_mem_type_set,
Tim Hall79d07d22020-04-27 18:20:16 +0100292 mark_output_tensors_overlapping_with_input_tensors,
293 use_ifm_ofm_overlap,
294 False,
295 lr_graph,
296 )
297 # Set the new time after handling the Npu subgraph
298 time_for_pass = lr_graph.current_time
299 cps.time = time_for_pass
300
301 for tens in cps.intermediates:
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200302 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100303 continue
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200304 rng = lr_graph.get_or_create_range(tens, allocation_alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100305 rng.mark_usage(time_for_pass)
306
307 for tens in cps.outputs:
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200308 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100309 continue
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200310 rng = lr_graph.get_or_create_range(tens, allocation_alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100311 output_time = time_for_pass
312 if not mark_output_tensors_overlapping_with_input_tensors and is_element_wise:
313 output_time += 1
314 rng.mark_usage(output_time)
315
316 if use_ifm_ofm_overlap:
317 # fill allowed overlap for ifm and ofm tensor
318 ifm_tensor = cps.passes[0].ifm_tensor
319 ofm_tensor = cps.passes[-1].ofm_tensor
320 if (
321 ifm_tensor is not None
322 and ofm_tensor is not None
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200323 and not tensor_should_be_ignored(ifm_tensor, target_mem_area, target_mem_type_set)
324 and not tensor_should_be_ignored(ofm_tensor, target_mem_area, target_mem_type_set)
Tim Hall79d07d22020-04-27 18:20:16 +0100325 ):
326 lr_graph.allowed_overlaps[(ifm_tensor, ofm_tensor)] = calc_allowed_ofm_ifm_overlap_for_cascaded_pass(
327 cps
328 )
329
330 lr_graph.current_time += 2
331
332 end_time = 0
333 for rng in lr_graph.ranges.values():
334 # Find the maximum end time of all live-ranges in the graph
335 end_time = max(end_time, rng.end_time)
336
337 for tens in sg.output_tensors:
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200338 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100339 continue
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200340 rng = lr_graph.get_or_create_range(tens, allocation_alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100341 rng.mark_usage(end_time)
342
343 # Add subgraph to set of processed subgraphs
344 lr_graph.processed_subgraphs.add(sg)
345 return lr_graph