blob: 0c8215daf01a72579fd2a4ba0591ca1b9514ab5f [file] [log] [blame]
Tim Hallb9828982024-01-19 14:56:02 +00001# SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Rickard Bolinbc6ee582022-11-04 08:24:29 +000016#
Tim Hall79d07d22020-04-27 18:20:16 +010017# Description:
18# Build a live range graph for tensors in one or more subgraphs. Used for tensor allocation as well as in the scheduler.
19# Can work with either a pass packed subgraph or a scheduled subgraph.
Tim Hallffe8e282021-06-24 18:29:53 +010020from collections import namedtuple
Louis Verhaard226ecaf2021-03-30 10:18:28 +020021from typing import List
22
Tim Halld8339a72021-05-27 18:49:40 +010023import numpy as np
24
Louis Verhaardaee5d752020-09-30 09:01:52 +020025from .operation import Op
Tim Halld8339a72021-05-27 18:49:40 +010026from .tensor import MemArea
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020027from .tensor import MemType
Diego Russoe8a10452020-04-21 17:39:10 +010028from .tensor import Tensor
Tim Halld8339a72021-05-27 18:49:40 +010029from .tensor import TensorPurpose
Raul Farkas1c54ac12023-04-26 07:49:15 +010030from .utils import progress_print
Tim Hall79d07d22020-04-27 18:20:16 +010031
32
33class LiveRange:
Jacob Bohlin0628a8c2020-08-28 13:25:14 +020034 def __init__(self, tens, alignment):
Tim Hall79d07d22020-04-27 18:20:16 +010035 self.tensors = [] # Tensors that are assigned to the same LiveRange will be allocated to the same address
36 self.start_time = 99999999999
37 self.end_time = -1
38 self.size = 0
39 self.name = ""
Jacob Bohlin0628a8c2020-08-28 13:25:14 +020040 self.alignment = alignment
Tim Halld8339a72021-05-27 18:49:40 +010041 self.mem_area = tens.mem_area if tens else MemArea.Unknown
Tim Hall79d07d22020-04-27 18:20:16 +010042
43 if tens:
44 self.add_tensor(tens)
45
46 def __str__(self):
erik.andersson@arm.comde6cb642022-02-02 14:03:15 +010047 return (
48 f"<live_range.LiveRange: {self.start_time}-{self.end_time}, "
49 f"size={self.size}, '{self.name}' #:{len(self.tensors)}>"
50 )
Tim Hall79d07d22020-04-27 18:20:16 +010051
52 __repr__ = __str__
53
54 def add_tensor(self, tens):
55 if self.size == 0:
56 self.size = tens.storage_size()
57 self.name = tens.name # LiveRange will be named after the first tensor added
58 else:
59 assert (
60 self.size >= tens.storage_size()
61 ), "Tensors assigned to the same LiveRange need to fit the size of the LiveRange."
62
63 self.tensors.append(tens)
64
Tim Halld8339a72021-05-27 18:49:40 +010065 def mark_usage(self, op_time, op_length=1):
66 op_time_start = max(op_time, 0)
67 op_time_end = op_time + op_length
Rickard Bolinfd8b5002022-05-16 09:11:06 +000068 if op_time_end < op_time_start:
Tim Hall79d07d22020-04-27 18:20:16 +010069 return
Tim Hall79d07d22020-04-27 18:20:16 +010070
71 self.start_time = min(self.start_time, op_time_start)
72 self.end_time = max(self.end_time, op_time_end)
73
Tim Halld8339a72021-05-27 18:49:40 +010074 def set_buffer_size(self, buffer_size):
75 self.size = buffer_size
76 self.mem_area = MemArea.Sram
77
Tim Hall79d07d22020-04-27 18:20:16 +010078 def overlaps_ranges(self, other):
Tim Hallb9828982024-01-19 14:56:02 +000079 return max(self.start_time, other.start_time) < min(self.end_time, other.end_time)
Tim Hall79d07d22020-04-27 18:20:16 +010080
81 def overlaps_address(self, other):
82 # Returns the first pair of tensors in this LiveRange and 'other' which have
83 # overlapping addresses
84 for tens in self.tensors:
85 for other_tens in other.tensors:
86 if max(tens.address, other_tens.address) < min(
87 tens.address + self.size, other_tens.address + other.size
88 ):
89 return True, tens, other_tens
90
91 return False, None, None
92
93 def __lt__(self, other):
94 if self.start_time != other.start_time:
95 return self.start_time < other.start_time
96 if self.end_time != other.end_time:
97 return self.end_time < other.end_time
98 if self.size != other.size:
99 return self.size < other.size
100 return self.name < other.name
101
102 def set_address(self, address):
Jacob Bohlin1a666972020-09-11 10:04:15 +0200103 # Set address of all tensors in LiveRange
Tim Hall79d07d22020-04-27 18:20:16 +0100104 for tens in self.tensors:
Jacob Bohlin1a666972020-09-11 10:04:15 +0200105 tens.address = address
106
107 return address
Tim Hall79d07d22020-04-27 18:20:16 +0100108
109 def get_alignment(self):
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200110 return self.alignment
Tim Hall79d07d22020-04-27 18:20:16 +0100111
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200112 def set_alignment(self, alignment):
113 self.alignment = max(self.alignment, alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100114
115
Tim Hall79d07d22020-04-27 18:20:16 +0100116class LiveRangeGraph:
117 def __init__(self):
Louis Verhaard226ecaf2021-03-30 10:18:28 +0200118 self.lrs: List[LiveRange] = [] # List of all created ranges
Tim Hall79d07d22020-04-27 18:20:16 +0100119 self.ranges = {} # tens -> range
Tim Hall79d07d22020-04-27 18:20:16 +0100120 self.processed_subgraphs = set()
121 self.current_time = 0
Tim Halld8339a72021-05-27 18:49:40 +0100122 self.end_time = None
Tim Hall79d07d22020-04-27 18:20:16 +0100123
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200124 def get_or_create_range(self, tens, alignment=Tensor.AllocationQuantum):
Jacob Bohlin1a666972020-09-11 10:04:15 +0200125 # Return the live range of the tensor (or any of its clones)
126 for existing_tensor, rng in self.ranges.items():
127 if tens.equivalent(existing_tensor):
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200128 rng.set_alignment(alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100129 return rng
130
131 # No live range found for the tensor, create a new one
Jacob Bohlin0628a8c2020-08-28 13:25:14 +0200132 rng = LiveRange(tens, alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100133 self.ranges[tens] = rng
Louis Verhaard226ecaf2021-03-30 10:18:28 +0200134 self.lrs.append(rng)
Tim Hall79d07d22020-04-27 18:20:16 +0100135 return rng
136
137 def fuse_ranges(self, in_tens, out_tens):
138 live_range = self.get_or_create_range(in_tens)
139 assert out_tens not in self.ranges, out_tens
140 live_range.add_tensor(out_tens)
141 self.ranges[out_tens] = live_range
142 return live_range
143
Johan Alfven463f74b2023-03-24 09:57:58 +0100144 def get_endtime(self):
145 # op_length is 1 so max end time for lr is current + 1
146 return self.current_time + 1
Tim Halld8339a72021-05-27 18:49:40 +0100147
148 def get_temporal_memory_usage(self, target_mem_area):
Johan Alfven463f74b2023-03-24 09:57:58 +0100149 usage = np.zeros(self.get_endtime() + 1, dtype=np.int32)
erik.andersson@arm.comde6cb642022-02-02 14:03:15 +0100150 for lr in self.lrs:
151 if lr.mem_area == target_mem_area:
Tim Halld8339a72021-05-27 18:49:40 +0100152 # End time is inclusive
Johan Alfven463f74b2023-03-24 09:57:58 +0100153 assert lr.end_time <= self.get_endtime() + 1
erik.andersson@arm.comde6cb642022-02-02 14:03:15 +0100154 usage[lr.start_time : lr.end_time + 1] += lr.size
Tim Halld8339a72021-05-27 18:49:40 +0100155
156 return usage
157
Tim Hall79d07d22020-04-27 18:20:16 +0100158
Fredrik Svedberg0ae28482021-10-27 13:58:03 +0200159def tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Johan Alfven9070f0f2023-02-07 13:01:03 +0100160 if tens.purpose == TensorPurpose.Virtual:
161 return True
Johan Alfvénfba0a7d2022-10-11 20:41:41 +0200162 if target_mem_area is None or target_mem_type_set is None:
163 return False
Patrik Gustavssona151f592020-10-16 13:59:52 +0200164 if tens.mem_area != target_mem_area or tens.mem_type not in target_mem_type_set:
165 return True
Patrik Gustavssona151f592020-10-16 13:59:52 +0200166 return False
167
168
Johan Alfvénfba0a7d2022-10-11 20:41:41 +0200169def _get_ifm_to_fuse(sched_op, target_mem_area=None, target_mem_type_set=None):
Johan Alfvénfba0a7d2022-10-11 20:41:41 +0200170 ifm_tens = None
Fredrik Svedberg0ac08042023-04-11 22:35:04 +0200171 elem_op = sched_op.parent_op
172 if sched_op.op_type.is_elementwise_op() and elem_op.memory_function is not Op.VariableTensorWrite:
Johan Alfven90724962023-02-02 09:07:48 +0100173 # Check if possible to merge ifm/ofm live ranges of elementwise op
Johan Alfven90724962023-02-02 09:07:48 +0100174 if not tensor_should_be_ignored(elem_op.ofm, target_mem_area, target_mem_type_set):
Jacob Bohlin98bfecd2021-06-21 17:22:20 +0200175 # Check if overwriting the inputs can be allowed
Tim Hallffe8e282021-06-24 18:29:53 +0100176 OpShapeTens = namedtuple("OpShapeTens", ["op_shape", "tens"])
177 outp = OpShapeTens(elem_op.ofm_shapes[0], elem_op.ofm)
178 inps = []
179 if elem_op.ifm is not None:
180 inps.append(OpShapeTens(elem_op.ifm_shapes[0], elem_op.ifm))
181 if elem_op.ifm2 is not None:
182 inps.append(OpShapeTens(elem_op.ifm_shapes[1], elem_op.ifm2))
Tim Hallffe8e282021-06-24 18:29:53 +0100183 # find an input tensor that can be overwritten by the output
184 for inp in inps:
185 if (
186 # check op input and output shapes allow overlapping
187 inp.op_shape == outp.op_shape
188 # check input tensor is valid
189 and inp.tens is not None
190 and inp.tens.shape != []
Johan Alfven90724962023-02-02 09:07:48 +0100191 and not inp.tens.ifm_write_protected
192 and not tensor_should_be_ignored(inp.tens, target_mem_area, target_mem_type_set)
Tim Hallffe8e282021-06-24 18:29:53 +0100193 # check input and output tensors are compatible
194 and inp.tens.format == outp.tens.format
195 and inp.tens.dtype == outp.tens.dtype
196 # check input tensor only has one consumer
197 and len(inp.tens.consumer_list) == 1
198 # check output tensor only has one producer
199 and len(outp.tens.ops) == 1
200 ):
Johan Alfvénfba0a7d2022-10-11 20:41:41 +0200201 ifm_tens = inp.tens
Tim Hallffe8e282021-06-24 18:29:53 +0100202 break
Johan Alfven90724962023-02-02 09:07:48 +0100203 elif sched_op.op_type == Op.Memcpy:
204 # Check if possible to merge ifm/ofm live ranges of dma op
205 dma_op = sched_op.parent_op
206 ifm = dma_op.ifm
207 ofm = dma_op.ofm
208 if not (
209 tensor_should_be_ignored(ifm, target_mem_area, target_mem_type_set)
210 or tensor_should_be_ignored(ofm, target_mem_area, target_mem_type_set)
Johan Alfven8df91f52023-07-10 12:20:22 +0200211 # input tensor only allowed to have one consumer
212 or len(ifm.consumer_list) > 1
Johan Alfven90724962023-02-02 09:07:48 +0100213 ):
214 # Currently DMA only used when bypassing memory only ops so ok to reuse ifm
Johan Alfven8df91f52023-07-10 12:20:22 +0200215 # if ifm has only one consumer
Johan Alfven90724962023-02-02 09:07:48 +0100216 ifm_tens = ifm
Tim Hall79d07d22020-04-27 18:20:16 +0100217
Johan Alfvénfba0a7d2022-10-11 20:41:41 +0200218 return ifm_tens
219
220
221def ofm_can_reuse_ifm(sched_op, target_mem_area=None, target_mem_type_set=None):
222 ifm = _get_ifm_to_fuse(sched_op, target_mem_area, target_mem_type_set)
223 return ifm is not None
224
225
226def merge_elementwise_op_ranges(sg, sched_op, lr_graph, target_mem_area, target_mem_type_set):
227 ifm = _get_ifm_to_fuse(sched_op, target_mem_area, target_mem_type_set)
228 if ifm:
229 lr_graph.fuse_ranges(ifm, sched_op.parent_op.ofm)
230
Tim Hall79d07d22020-04-27 18:20:16 +0100231
232def extract_live_ranges_from_cascaded_passes(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200233 sg,
234 target_mem_area,
235 target_mem_type_set,
236 lr_graph=None,
237 cpu_tensor_alignment=Tensor.AllocationQuantum,
Raul Farkas1c54ac12023-04-26 07:49:15 +0100238 verbose_progress: bool = False,
Tim Hall79d07d22020-04-27 18:20:16 +0100239):
Diego Russoea6111a2020-04-14 18:41:58 +0100240 if lr_graph is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100241 lr_graph = LiveRangeGraph()
242
243 if sg in lr_graph.processed_subgraphs:
244 # if subgraph has been processed already, return the lr_graph as is
245 return lr_graph
246
Raul Farkas1c54ac12023-04-26 07:49:15 +0100247 for index, cps in enumerate(sg.cascaded_passes):
248 progress_print(verbose_progress, "Processing cascaded pass", index, sg.cascaded_passes)
Tim Hall79d07d22020-04-27 18:20:16 +0100249 cps.time = lr_graph.current_time
250
251 time_for_pass = cps.time
252
Tim Hall79d07d22020-04-27 18:20:16 +0100253 for tens in cps.inputs:
Fredrik Svedberg0ae28482021-10-27 13:58:03 +0200254 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100255 continue
Tim Hallb9b515c2020-11-01 21:27:19 +0000256 rng = lr_graph.get_or_create_range(tens, cpu_tensor_alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100257 rng.mark_usage(time_for_pass)
258
Fredrik Svedbergf3c7d552022-11-04 09:48:49 +0100259 op = cps.passes[0].ops[0] if cps.passes[0].ops else None
260 op_subgraph = op.attrs.get("subgraph", None) if op else None
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200261
Johan Alfvén673683b2022-09-05 09:39:47 +0200262 if op_subgraph is not None and MemType.Permanent_CPU not in target_mem_type_set:
Fredrik Svedbergf3c7d552022-11-04 09:48:49 +0100263 if op.type == Op.CustomNpuOp:
Johan Alfvén673683b2022-09-05 09:39:47 +0200264 # If the primary-op is an NpuOp that means this is where an Npu subgraph
265 # is called. Go into said subgraph and extract live ranges before continuing.
266 # Use default allocation alignment of 16 for Npu tensors
Johan Alfven6e281af2023-02-28 09:03:03 +0100267 lr_graph = extract_live_ranges_from_schedule(
Johan Alfvén673683b2022-09-05 09:39:47 +0200268 op_subgraph, target_mem_area, target_mem_type_set, lr_graph
269 )
270 else:
271 # The op has one or more subgraphs in it (a typical op is the While op)
272 # Go into all subgraphs and extract live ranges before continuing.
273 for op_sg in op_subgraph:
274 lr_graph = extract_live_ranges_from_cascaded_passes(
275 op_sg, target_mem_area, target_mem_type_set, lr_graph, cpu_tensor_alignment
276 )
Tim Hall79d07d22020-04-27 18:20:16 +0100277 # Set the new time after handling the Npu subgraph
Johan Alfven463f74b2023-03-24 09:57:58 +0100278 # current time is updated in subgraph path so do not tick the time
Tim Hall79d07d22020-04-27 18:20:16 +0100279 time_for_pass = lr_graph.current_time
280 cps.time = time_for_pass
Johan Alfven463f74b2023-03-24 09:57:58 +0100281 else:
282 lr_graph.current_time += 2
Tim Hall79d07d22020-04-27 18:20:16 +0100283
Patrik Gustavssona151f592020-10-16 13:59:52 +0200284 for tens in cps.intermediates + cps.outputs:
Fredrik Svedberg0ae28482021-10-27 13:58:03 +0200285 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100286 continue
Tim Hallb9b515c2020-11-01 21:27:19 +0000287 rng = lr_graph.get_or_create_range(tens, cpu_tensor_alignment)
Tim Hall79d07d22020-04-27 18:20:16 +0100288 rng.mark_usage(time_for_pass)
289
Johan Alfven463f74b2023-03-24 09:57:58 +0100290 time_to_set = lr_graph.current_time
Tim Hall79d07d22020-04-27 18:20:16 +0100291 for tens in sg.output_tensors:
Fredrik Svedberg0ae28482021-10-27 13:58:03 +0200292 if tensor_should_be_ignored(tens, target_mem_area, target_mem_type_set):
Tim Hall79d07d22020-04-27 18:20:16 +0100293 continue
Tim Hallb9b515c2020-11-01 21:27:19 +0000294 rng = lr_graph.get_or_create_range(tens, cpu_tensor_alignment)
Johan Alfven463f74b2023-03-24 09:57:58 +0100295 rng.mark_usage(time_to_set)
Tim Hall79d07d22020-04-27 18:20:16 +0100296
Fredrik Svedberg33c01e62023-02-13 11:32:12 +0100297 # Variable tensor live-range is for entire inference
298 for tens, rng in lr_graph.ranges.items():
299 if tens.is_variable:
Johan Alfven463f74b2023-03-24 09:57:58 +0100300 rng.mark_usage(0, time_to_set + 1)
Fredrik Svedberg33c01e62023-02-13 11:32:12 +0100301
Tim Hall79d07d22020-04-27 18:20:16 +0100302 # Add subgraph to set of processed subgraphs
303 lr_graph.processed_subgraphs.add(sg)
304 return lr_graph
Tim Halld8339a72021-05-27 18:49:40 +0100305
306
307def create_linear_live_range_graph(sg, target_mem_area, target_mem_type_set, lr_graph):
308 assert lr_graph is not None
309 sg_time = lr_graph.current_time
310 for ps in sg.passes:
311 for tens in ps.inputs + ps.outputs + ps.intermediates:
312 if tens.purpose == TensorPurpose.Weights or tensor_should_be_ignored(
Fredrik Svedberg0ae28482021-10-27 13:58:03 +0200313 tens, target_mem_area, target_mem_type_set
Tim Halld8339a72021-05-27 18:49:40 +0100314 ):
315 continue
Tim Halld8339a72021-05-27 18:49:40 +0100316 rng = lr_graph.get_or_create_range(tens)
317 rng.mark_usage(sg_time)
318
Jacob Bohlin98bfecd2021-06-21 17:22:20 +0200319 for _, op_info in sg.schedule.cost_map.items():
Tim Halld784af72021-06-08 21:25:57 +0100320 for tensor in [op_info.npu_weights_tensor, op_info.npu_scales_tensor]:
Fredrik Svedberg0ae28482021-10-27 13:58:03 +0200321 if tensor and not (tensor_should_be_ignored(tensor, target_mem_area, target_mem_type_set)):
Tim Halld784af72021-06-08 21:25:57 +0100322 rng = lr_graph.get_or_create_range(tensor)
323 rng.mark_usage(sg_time)
Tim Halld8339a72021-05-27 18:49:40 +0100324
325 lr_graph.current_time += 1
326 return lr_graph
327
328
Raul Farkas1c54ac12023-04-26 07:49:15 +0100329def extract_live_ranges_from_schedule(sg, target_mem_area, target_mem_type_set, lr_graph, verbose_progress=False):
Tim Halld8339a72021-05-27 18:49:40 +0100330 time_for_cascade = {}
Raul Farkas1c54ac12023-04-26 07:49:15 +0100331 for index, sched_op in enumerate(sg.sched_ops):
332 progress_print(verbose_progress, "Processing SchedulerOp", index, sg.sched_ops)
Tim Halld8339a72021-05-27 18:49:40 +0100333 op_info = sg.schedule.cost_map[sched_op]
334 cascade = op_info.cascade
335 cascade_info = sg.schedule.cascades.get(cascade, None)
336
Johan Alfvén783d3642022-07-19 14:03:27 +0200337 if cascade_info is None:
338 # Op is not part of a cascade, check if the ifm can be overwritten by the ofm
339 merge_elementwise_op_ranges(sg, sched_op, lr_graph, target_mem_area, target_mem_type_set)
340
Tim Halld8339a72021-05-27 18:49:40 +0100341 time_to_set = time_for_cascade.get(cascade, lr_graph.current_time)
342
343 op_info.time_index = time_to_set
344
345 # Mark usage for all tensors related to this Pass
346 ps = sched_op.parent_ps
347 for tens in ps.inputs + ps.outputs + ps.intermediates:
348 if (
349 target_mem_area == MemArea.Sram
350 and cascade_info
351 and tens == ps.ifm_tensor
352 and sched_op in cascade_info.buffers
353 ):
354 # This tensor is a rolling buffer in a cascade and the size of the LiveRange needs to be modified
355 # for enabling temporal memory snapshots without modifying the original Tensor
356 rng = lr_graph.get_or_create_range(tens)
357 rng.set_buffer_size(cascade_info.buffers[sched_op].elements() * sched_op.ifm.dtype.size_in_bytes())
358 elif (
359 tens.purpose == TensorPurpose.Weights
360 or tens.purpose == TensorPurpose.FSBias
361 or tens.mem_type not in target_mem_type_set
362 or tens.mem_area != target_mem_area
363 ):
364 continue
365
366 else:
367 rng = lr_graph.get_or_create_range(tens)
368
369 rng.mark_usage(time_to_set)
370
Rickard Bolinfd8b5002022-05-16 09:11:06 +0000371 for idx, weight_tens in enumerate(op_info.buffered_weight_tensors):
372 if weight_tens.mem_type in target_mem_type_set and weight_tens.mem_area == target_mem_area:
373 rng = lr_graph.get_or_create_range(weight_tens)
374 start_time = time_to_set
375 length = 1
376 if weight_tens.pre_buffer:
377 start_time -= 1
378 length += 1
379 if len(op_info.buffered_weight_tensors) > 1:
380 last_idx = len(op_info.ofm_depth_slices) % len(op_info.buffered_weight_tensors)
381 # Double buffering: reduce end time of the buffer that is not used last
382 if last_idx != idx:
383 length -= 1
384 rng.mark_usage(start_time, length)
Tim Halld8339a72021-05-27 18:49:40 +0100385
386 if time_to_set == lr_graph.current_time:
387 lr_graph.current_time += 2
388
389 if cascade != 0:
390 time_for_cascade[cascade] = time_to_set
391
Johan Alfven463f74b2023-03-24 09:57:58 +0100392 time_to_set = lr_graph.current_time
Tim Halld8339a72021-05-27 18:49:40 +0100393 for tens in sg.output_tensors:
394 if tens.mem_type not in target_mem_type_set or tens.mem_area != target_mem_area:
395 continue
396 rng = lr_graph.get_or_create_range(tens)
Johan Alfven463f74b2023-03-24 09:57:58 +0100397 rng.mark_usage(time_to_set)
Tim Halld8339a72021-05-27 18:49:40 +0100398
399 return lr_graph