blob: ab72fbcde6859e0e06d955cf493ab97186c6a439 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Generate a high-level command stream from a scheduled subgraph with CascadedPasses.
18#
19# Also used during scheduling to work out allowable IFM/OFM overlap, this functionality can be accessed using
20# calc_allowed_ofm_ifm_overlap_for_cascaded_pass().
Diego Russoe8a10452020-04-21 17:39:10 +010021from .high_level_command_stream import Box
22from .high_level_command_stream import DMA
23from .high_level_command_stream import NpuStripe
24from .nn_graph import PassPlacement
25from .nn_graph import SchedulingStrategy
Tim Hall79d07d22020-04-27 18:20:16 +010026from .operation import NpuBlockType
Charles Xu78792222020-05-13 10:15:26 +020027from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
Charles Xu78792222020-05-13 10:15:26 +020030def dma_if_necessary(ps, box, tensor):
Louis Verhaard3c07c972020-05-07 08:12:58 +020031 if tensor.needs_dma():
Charles Xu78792222020-05-13 10:15:26 +020032 dma_op = tensor.ops[0]
Tim Hall79d07d22020-04-27 18:20:16 +010033 in_tensor = dma_op.inputs[0]
Charles Xu78792222020-05-13 10:15:26 +020034 yield DMA(in_tensor, tensor, box)
Tim Hall79d07d22020-04-27 18:20:16 +010035
Charles Xu600351a2020-05-18 08:54:47 +020036def match_tensor(source, derived):
37 if source == derived:
38 return True
39 ops = derived.ops
40 return (ops != [] and
41 len(ops) ==1 and
42 ops[0].type == "SplitSliceRead" and
43 source == ops[0].inputs[0])
Tim Hall79d07d22020-04-27 18:20:16 +010044
45def generate_high_level_command_stream_for_pass(strat, passes, block_configs, idx):
46 is_first = idx == 0
47 is_last = idx == len(passes) - 1
48 ps = passes[idx]
49 block_config = block_configs[idx]
Charles Xu600351a2020-05-18 08:54:47 +020050 npu_block_type = ps.npu_block_type
51 split_offsets = [None, None] # offset for [ifm, ifm2]
52
53 ifm_idx = 0
54 for op in ps.ops:
55 if op.type == "SplitSliceRead":
56 split_offsets[ifm_idx] = op.attrs["split_start"]
57 ps.primary_op.attrs["fused_memory_function"] = op.type
58 ifm_idx += 1
59
60 if len(ps.inputs) == 2 and npu_block_type == NpuBlockType.ElementWise:
61 # Ensure correct imf and ifm2 order
62 if (match_tensor(ps.inputs[0], ps.primary_op.inputs[1]) and
63 match_tensor(ps.inputs[1], ps.primary_op.inputs[0])):
64 ps.ifm_tensor, ps.ifm2_tensor = ps.ifm2_tensor, ps.ifm_tensor
65 split_offsets[0], split_offsets[1] = split_offsets[1], split_offsets[0]
Tim Hall79d07d22020-04-27 18:20:16 +010066
67 ifm_tensor = ps.ifm_tensor
68 ifm2_tensor = ps.ifm2_tensor
69 ofm_tensor = ps.ofm_tensor
70 weight_tensor = ps.weight_tensor
71 scale_tensor = ps.scale_tensor
72
73 ofm_start = [0] * len(ofm_tensor.shape)
74 ofm_end = list(ofm_tensor.shape)
75
76 strides = None
77 skirt = None
Jacob Bohlin611fcdf2020-06-11 15:09:57 +020078 upscaling = 1
Tim Hall79d07d22020-04-27 18:20:16 +010079 if ps.primary_op is not None:
80 strides = ps.primary_op.attrs.get("strides", None)
81 skirt = ps.primary_op.attrs.get("skirt", None)
Jacob Bohlin611fcdf2020-06-11 15:09:57 +020082 if ps.primary_op.type in set(("Conv2DBackpropInputSwitchedBias", "ResizeBilinear")):
83 upscaling = ofm_tensor.shape[-3] // ifm_tensor.shape[-3]
84 assert ofm_tensor.shape[-2] == (ifm_tensor.shape[-2] * upscaling)
Tim Hall79d07d22020-04-27 18:20:16 +010085
Tim Hall79d07d22020-04-27 18:20:16 +010086 concat_axis = 0
87 concat_offset = 0
88
Tim Hall79d07d22020-04-27 18:20:16 +010089 # Fusable activation functions
90 activation_ops = set(("Sigmoid", "Tanh", "Relu", "Relu6", "ReluN1To1"))
91
92 for op in ps.ops:
93 if op.type == "ConcatSliceWrite":
94 concat_axis = op.attrs["concat_axis"]
95 concat_start = op.attrs["concat_start"]
96 concat_end = op.attrs["concat_end"]
97
98 ofm_start[concat_axis] = concat_start
99 ofm_end[concat_axis] = concat_end
100 concat_offset = concat_start
101 ps.primary_op.attrs["fused_memory_function"] = op.type
102 elif op.type in activation_ops:
103 ps.primary_op.attrs["fused_activation_function"] = op.type
104
Tim Hall79d07d22020-04-27 18:20:16 +0100105 if strat == SchedulingStrategy.WeightStream:
106 ofm_step = block_config[-1]
107 ofm_stop = ofm_end[-1]
Louis Verhaard3c07c972020-05-07 08:12:58 +0200108 if weight_tensor is None or not weight_tensor.needs_dma():
Tim Hall79d07d22020-04-27 18:20:16 +0100109 ofm_step = ofm_stop
110 for start in range(ofm_start[-1], ofm_stop, ofm_step):
111 end = min(start + ofm_step, ofm_stop)
112 ofm_start[-1] = start
113 ofm_end[-1] = end
114 ofm_box = Box(ofm_start, ofm_end)
115 ifm_box = None
116 ifm2_box = None
117
118 if ifm_tensor.shape != []:
119 ifm_box, _, _ = ofm_box.transform_with_strides_and_skirt(
Jacob Bohlin611fcdf2020-06-11 15:09:57 +0200120 strides, skirt, ifm_tensor.shape, npu_block_type, concat_axis, concat_offset, split_offsets[0], upscaling
Tim Hall79d07d22020-04-27 18:20:16 +0100121 )
122 else:
123 ifm_box = Box([], [])
124 if ifm2_tensor is not None and ifm2_tensor.shape != []:
125 ifm2_box, _, _ = ofm_box.transform_with_strides_and_skirt(
Jacob Bohlin611fcdf2020-06-11 15:09:57 +0200126 strides, skirt, ifm2_tensor.shape, npu_block_type, concat_axis, concat_offset, split_offsets[1], upscaling
Tim Hall79d07d22020-04-27 18:20:16 +0100127 )
128 else:
129 ifm2_box = Box([], [])
130
Charles Xu78792222020-05-13 10:15:26 +0200131 for intermediate in ps.intermediates:
132 if intermediate != None and intermediate.shape != [] and intermediate.purpose == TensorPurpose.FeatureMap:
133 intermediate_box, _, _ = ofm_box.transform_with_strides_and_skirt(
Jacob Bohlin611fcdf2020-06-11 15:09:57 +0200134 strides, skirt, intermediate.shape, npu_block_type, concat_axis, concat_offset, split_offsets[0], upscaling
Charles Xu78792222020-05-13 10:15:26 +0200135 )
136 yield from dma_if_necessary(ps, intermediate_box, intermediate)
137
Tim Hall79d07d22020-04-27 18:20:16 +0100138 weight_box = None
139 if weight_tensor is not None:
140 weight_oc_start = start
141 weight_oc_end = end
142 if concat_axis - len(weight_tensor.shape) == -1:
143 weight_oc_start -= concat_offset
144 weight_oc_end -= concat_offset
145
146 weight_box = Box.make_weight_box(
147 weight_tensor.shape,
148 npu_block_type,
149 weight_oc_start,
150 weight_oc_end,
151 weight_tensor.weight_transpose_depthwise,
152 )
Charles Xu78792222020-05-13 10:15:26 +0200153 yield from dma_if_necessary(ps, weight_box, weight_tensor)
Tim Hall79d07d22020-04-27 18:20:16 +0100154
155 yield NpuStripe(
156 ps,
157 block_config,
158 is_first,
159 is_last,
160 True,
161 True,
162 ifm_tensor,
163 ifm_box,
164 ofm_tensor,
165 ofm_box,
166 weight_tensor,
167 weight_box,
168 scale_tensor,
169 concat_axis,
170 concat_offset,
171 ifm2_tensor=ifm2_tensor,
172 ifm2_box=ifm2_box,
173 )
174
175 elif strat == SchedulingStrategy.IfmStream:
176 y_step = block_config[0]
177 y_start = 0
178 y_dim = 1
179 if len(ofm_tensor.shape) >= 3:
180 y_start = ofm_start[-3]
181 y_dim = ofm_end[-3]
182 if idx > 0:
183 ifm_y_present = 0
184 prev_pass = passes[idx - 1]
185 prev_pass_gen = generate_high_level_command_stream_for_pass(strat, passes, block_configs, idx - 1)
186 else:
187 ifm_y_present = 1
188 if len(ifm_tensor.shape) >= 3:
189 ifm_y_present = ifm_tensor.shape[-3]
190 prev_pass_gen = []
191 prev_pass = None
192
193 if len(passes) == 1:
194 # no cascading, can just issue one big stripe
195 # but only if we've done allocation and OFM does not overlap IFM
196 if ifm_tensor.address != -1 and ofm_tensor.address != -1:
197 if (
198 ifm_tensor.address + ifm_tensor.storage_size() <= ofm_tensor.address
199 or ofm_tensor.address + ofm_tensor.storage_size() <= ifm_tensor.address
200 ):
201 y_step = y_dim
202
203 weight_box = None
204
205 for start in range(y_start, y_dim, y_step):
206 end = min(start + y_step, y_dim)
207 if len(ofm_tensor.shape) >= 3:
208 ofm_start[-3] = start
209 ofm_end[-3] = end
210 ofm_box = Box(ofm_start, ofm_end)
211
212 k_height = 1
213 if npu_block_type == NpuBlockType.Pooling:
214 if ps.primary_op is not None:
215 k_height = ps.primary_op.attrs["ksize"][1]
216 else:
217 if weight_tensor is not None:
218 k_height = weight_tensor.shape[0]
219
220 ifm_box, pad_top, pad_bottom = ofm_box.transform_with_strides_and_skirt(
Jacob Bohlin611fcdf2020-06-11 15:09:57 +0200221 strides, skirt, ifm_tensor.shape, npu_block_type, concat_axis, concat_offset, split_offsets[0], k_height, upscaling
Tim Hall79d07d22020-04-27 18:20:16 +0100222 )
223
Charles Xu78792222020-05-13 10:15:26 +0200224 for intermediate in ps.intermediates:
225 if intermediate != None and intermediate.shape != [] and intermediate.purpose == TensorPurpose.FeatureMap:
226 intermediate_box, _, _ = ofm_box.transform_with_strides_and_skirt(
Jacob Bohlin611fcdf2020-06-11 15:09:57 +0200227 strides, skirt, intermediate.shape, npu_block_type, concat_axis, concat_offset, split_offsets[0], upscaling
Charles Xu78792222020-05-13 10:15:26 +0200228 )
229 yield from dma_if_necessary(ps, intermediate_box, intermediate)
230
Tim Hall79d07d22020-04-27 18:20:16 +0100231 ifm_y_needed = 1
232 if len(ifm_box.end_coord) >= 3:
233 ifm_y_needed = ifm_box.end_coord[-3]
234 if ifm_y_present < ifm_y_needed:
235 for prev_cmd in prev_pass_gen:
236 yield prev_cmd
237 rng = prev_cmd.get_ofm_y_range_for_pass(prev_pass)
238 if rng is not None:
239 ifm_y_present = max(ifm_y_present, rng[1])
240 if ifm_y_present >= ifm_y_needed:
241 break
242
243 if weight_tensor is not None and weight_box is None:
244 weight_box = Box.make_weight_box(
245 weight_tensor.shape, npu_block_type, weights_transposed=weight_tensor.weight_transpose_depthwise
246 )
Charles Xu78792222020-05-13 10:15:26 +0200247 yield from dma_if_necessary(ps, weight_box, weight_tensor)
Tim Hall79d07d22020-04-27 18:20:16 +0100248
249 # Check if first/last stripe in pass
250 is_first_h_stripe = start == y_start
251 is_last_h_stripe = (start + y_step) >= y_dim
252
253 stripe = NpuStripe(
254 ps,
255 block_config,
256 is_first,
257 is_last,
258 is_first_h_stripe,
259 is_last_h_stripe,
260 ifm_tensor,
261 ifm_box,
262 ofm_tensor,
263 ofm_box,
264 weight_tensor,
265 weight_box,
266 scale_tensor,
267 concat_axis,
268 concat_offset,
269 None,
270 None,
271 pad_top,
272 pad_bottom,
273 )
274 yield stripe
275 else:
276 assert 0, "unknown scheduling strategy"
277
278
279def generate_high_level_command_stream_for_pass_list(strat, passes, block_configs):
280 if strat == SchedulingStrategy.WeightStream:
281 for idx in range(len(passes)):
282 yield from generate_high_level_command_stream_for_pass(strat, passes, block_configs, idx)
283 elif strat == SchedulingStrategy.IfmStream:
284 yield from generate_high_level_command_stream_for_pass(strat, passes, block_configs, len(passes) - 1)
285 else:
286 assert 0, "Unknown streaming strategy"
287
288
289def generate_high_level_command_stream_for_cascaded_pass(cps):
290 yield from generate_high_level_command_stream_for_pass_list(
291 cps.strategy, cps.passes, [ps.block_config for ps in cps.passes]
292 )
293
294
295def generate_high_level_command_stream(nng, sg, arch, verbose_high_level_command_stream):
296 res = []
297 for cps in sg.cascaded_passes:
298 if cps.placement == PassPlacement.Npu:
299 res += list(generate_high_level_command_stream_for_cascaded_pass(cps))
300
301 sg.high_level_command_stream = res
302 if verbose_high_level_command_stream:
303 sg.print_high_level_command_stream()
304
305
306def calc_allowed_ofm_ifm_overlap_for_pass_list(strat, passes, block_configs):
307 highest_ofm_write = 0
308 if not passes[0].ifm_tensor or not passes[-1].ofm_tensor:
309 return 0
310
311 ifm_read = passes[0].ifm_tensor.storage_size
312 min_overlap = 999999999999999999999
313 ofm_size = passes[-1].ofm_tensor.storage_size()
314 if strat == SchedulingStrategy.WeightStream:
315 return 0
316 for cmd in generate_high_level_command_stream_for_pass_list(strat, passes, block_configs):
317 if cmd.is_npu_pass_command():
318 if cmd.is_first:
319 ifm_read = cmd.ifm_tensor.address_offset_for_coordinate(cmd.ifm_box.start_coord, is_top_box=False)
320 if ifm_read is None:
321 return 0
322 if cmd.is_last:
323 write_offset = cmd.ofm_tensor.address_offset_for_coordinate(cmd.ofm_box.end_coord, is_top_box=True)
324 if write_offset is None:
325 return 0
326 highest_ofm_write = max(write_offset, highest_ofm_write)
327
328 if cmd.is_first or cmd.is_last:
329 overlap_required = max(highest_ofm_write - min(ifm_read, ofm_size), 0)
330 can_overwrite = ofm_size - overlap_required
331 min_overlap = min(min_overlap, can_overwrite)
332
333 if cmd.is_first:
334 ifm_read = cmd.ifm_tensor.address_offset_for_coordinate(cmd.ifm_box.end_coord, is_top_box=True)
335
336 min_overlap = max(min_overlap, 0)
337 return min_overlap
338
339
340def calc_allowed_ofm_ifm_overlap_for_cascaded_pass(cps):
341 return calc_allowed_ofm_ifm_overlap_for_pass_list(cps.strategy, cps.passes, [ps.block_config for ps in cps.passes])