blob: 988e52e6b5f029d4ce4fc4f7feab7502f2421d64 [file] [log] [blame]
Patrik Gustavssone3b1b912021-02-09 15:38:46 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Packs a subgraph with Neural Network Operations into Passes. Each Pass has one or more Operations.
Diego Russoea6111a2020-04-14 18:41:58 +010018import collections
Diego Russoe8a10452020-04-21 17:39:10 +010019import enum
Diego Russoea6111a2020-04-14 18:41:58 +010020
Tim Halle6ccd872020-11-09 16:46:37 +000021from .debug_database import DebugDatabase
Diego Russoe8a10452020-04-21 17:39:10 +010022from .nn_graph import Pass
23from .nn_graph import PassPlacement
24from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020025from .operation import Op
Fredrik Svedbergd9c2c422020-12-01 16:33:45 +010026from .operation_util import create_avgpool_nop
Diego Russoea6111a2020-04-14 18:41:58 +010027from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
30class PassFlags(enum.Flag):
31 Empty = 0
Patrik Gustavsson56b6c712021-02-16 12:57:03 +010032 Main = 1
33 Post = 2
34 Mac = 4
Tim Halld8339a72021-05-27 18:49:40 +010035 ElementWise = 8
36 Npu = 16
37 Cpu = 32
38 StartupInit = 64
39 MemoryOnly = 128
40 PostFusingLimited = 256
Tim Hall79d07d22020-04-27 18:20:16 +010041
42
Tim Hall79d07d22020-04-27 18:20:16 +010043mac_main_ops = set(
44 (
45 # convolutions
Louis Verhaardaee5d752020-09-30 09:01:52 +020046 Op.Conv2DBias,
47 Op.Conv2D,
48 Op.QuantizedConv2D,
49 Op.Conv2DBackpropInputSwitchedBias,
Tim Hall79d07d22020-04-27 18:20:16 +010050 # depth-wise convolutions
Louis Verhaardaee5d752020-09-30 09:01:52 +020051 Op.DepthwiseConv2DBias,
Tim Hall79d07d22020-04-27 18:20:16 +010052 # FC layers
Louis Verhaardaee5d752020-09-30 09:01:52 +020053 Op.QuantizedMatMul,
54 Op.MatMul,
55 Op.FullyConnected,
Tim Hall79d07d22020-04-27 18:20:16 +010056 # RNN/LSTM/GRU
Louis Verhaardaee5d752020-09-30 09:01:52 +020057 Op.BlockLSTM,
Tim Hall79d07d22020-04-27 18:20:16 +010058 # pooling
Louis Verhaardaee5d752020-09-30 09:01:52 +020059 Op.QuantizedMaxPool,
60 Op.QuantizedAvgPool,
61 Op.AvgPool,
62 Op.MaxPool,
63 Op.ReduceSum,
Tim Hall79d07d22020-04-27 18:20:16 +010064 )
Tim Hall885033b2022-07-21 11:46:03 +010065 # resize ops use pooling operations unless explicitly converted to other operations prior to pass packing
66) | Op.op_set(Op.is_resize_op)
Tim Hall79d07d22020-04-27 18:20:16 +010067
Louis Verhaardaee5d752020-09-30 09:01:52 +020068binary_elem_wise_main_ops = Op.op_set(Op.is_binary_elementwise_op)
Tim Hall79d07d22020-04-27 18:20:16 +010069
Michael McGeaghf3e3ad72020-12-02 12:39:03 +000070unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Tim Hall79d07d22020-04-27 18:20:16 +010071
72elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
73
Louis Verhaardaee5d752020-09-30 09:01:52 +020074activation_ops = Op.op_set(Op.is_relu_op)
75npu_post_ops = activation_ops
Tim Hall79d07d22020-04-27 18:20:16 +010076
77npu_post_fuse_limited_ops = set(
78 # Set of post operators that should not be fused with main/elementwise ops
Patrik Gustavsson138d47f2021-02-08 10:13:48 +010079 (Op.Sigmoid, Op.Tanh, Op.Quantize)
Tim Hall79d07d22020-04-27 18:20:16 +010080)
81
Louis Verhaardaee5d752020-09-30 09:01:52 +020082elem_wise_ops = elem_wise_main_ops | activation_ops | set((Op.Sigmoid, Op.Tanh))
Tim Hall79d07d22020-04-27 18:20:16 +010083
84
Louis Verhaardaee5d752020-09-30 09:01:52 +020085quantization_ops = set((Op.Dequantize, Op.Max, Op.Min))
86cpu_ops = set((Op.Softmax, Op.LRN, Op.Shape, Op.Pad, Op.AddN)) | quantization_ops
Tim Hall79d07d22020-04-27 18:20:16 +010087
patrik.gustavsson10683622020-10-14 10:57:46 +000088startup_init_ops = set((Op.Const, Op.Placeholder, Op.SubgraphInput))
Jonas Ohlssond8575072022-03-30 10:30:25 +020089memory_only_ops = set(
90 (
91 Op.Squeeze,
92 Op.Reshape,
93 Op.QuantizedReshape,
94 Op.ExpandDims,
95 )
96)
Tim Hall79d07d22020-04-27 18:20:16 +010097
98
99test_sequence = [
100 (
101 # ops_set
102 npu_post_ops,
103 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100104 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Main,
Tim Hall79d07d22020-04-27 18:20:16 +0100105 # flags_to_set
106 PassFlags.Npu | PassFlags.Post,
107 # flags_to_clear
108 PassFlags.Empty,
109 ),
110 (
111 # ops_set
112 npu_post_fuse_limited_ops,
113 # incompatible_pack_flags
Tim Hallb1a9a922021-10-29 12:51:53 +0100114 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Main | PassFlags.PostFusingLimited,
Tim Hall79d07d22020-04-27 18:20:16 +0100115 # flags_to_set
116 PassFlags.Npu | PassFlags.PostFusingLimited,
117 # flags_to_clear
118 PassFlags.Empty,
119 ),
120 (
121 # ops_set
122 mac_main_ops,
123 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100124 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.ElementWise | PassFlags.Main | PassFlags.PostFusingLimited,
Tim Hall79d07d22020-04-27 18:20:16 +0100125 # flags_to_set
126 PassFlags.Npu | PassFlags.Mac | PassFlags.Main,
127 # flags_to_clear
128 PassFlags.Empty,
129 ),
130 (
131 # ops_set
132 elem_wise_main_ops,
133 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100134 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Mac | PassFlags.Main | PassFlags.PostFusingLimited,
Tim Hall79d07d22020-04-27 18:20:16 +0100135 # flags_to_set
136 PassFlags.Npu | PassFlags.ElementWise | PassFlags.Main,
137 # flags_to_clear
138 PassFlags.Empty,
139 ),
140 (
141 # ops_set
Tim Hall79d07d22020-04-27 18:20:16 +0100142 startup_init_ops,
143 # incompatible_pack_flags
144 PassFlags.Npu | PassFlags.Cpu | PassFlags.MemoryOnly,
145 # flags_to_set
146 PassFlags.StartupInit | PassFlags.Main,
147 # flags_to_clear
148 PassFlags.Empty,
149 ),
150 (
151 # ops_set
152 memory_only_ops,
153 # incompatible_pack_flags
154 PassFlags.Npu | PassFlags.Cpu,
155 # flags_to_set
156 PassFlags.MemoryOnly | PassFlags.Main,
157 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100158 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100159 ),
160 (
161 # ops_set
162 cpu_ops,
163 # incompatible_pack_flags
164 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
165 # flags_to_set
166 PassFlags.Cpu | PassFlags.Main,
167 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100168 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100169 ),
Diego Russoea6111a2020-04-14 18:41:58 +0100170 ( # This last one is a fallback for unrecognised operations
Tim Hall79d07d22020-04-27 18:20:16 +0100171 # ops_set
172 None,
173 # incompatible_pack_flags
174 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
175 # flags_to_set
176 PassFlags.Cpu | PassFlags.Main,
177 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100178 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100179 ),
180]
181
182# Some sanity checking
183for (operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear) in test_sequence:
184 assert not flags_to_clear & flags_to_set
185
Tim Hall79d07d22020-04-27 18:20:16 +0100186
187def pack_into_passes(nng, arch, verbose_packing=False):
Johan Alfvén628928d2022-01-27 06:47:26 +0100188 def visit_op(op, ignored):
Tim Hall79d07d22020-04-27 18:20:16 +0100189 visit_op_refcount[op] += 1
190
191 if visit_op_refcount[op] == 1: # First-time visit, go and fix up unused output tensors
192 for tens in op.outputs:
193 if len(tens.consumers()) == 0:
194 visit_op_refcount[op] += 1
195
Johan Alfvén628928d2022-01-27 06:47:26 +0100196 assert visit_op_refcount[op] <= len(op.outputs)
Tim Hall79d07d22020-04-27 18:20:16 +0100197 if visit_op_refcount[op] == len(op.outputs):
198
199 if op.type in startup_init_ops:
200 startup_list.append(op)
201 else:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200202 ofm_tensor = op.ofm
Tim Hall79d07d22020-04-27 18:20:16 +0100203 if ofm_tensor is None:
204 ofm_tensor = op.outputs[0]
Tim Hall73e843f2021-02-04 22:47:46 +0000205 ofm_shape = op.ofm_shapes[0] if op.run_on_npu else None
Tim Hall79d07d22020-04-27 18:20:16 +0100206
Johan Alfvén628928d2022-01-27 06:47:26 +0100207 build_pass((op,), ofm_tensor, ofm_shape)
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100208
Johan Alfvén628928d2022-01-27 06:47:26 +0100209 def build_pass(start_ops_to_process, ofm_tensor=None, ofm_shape=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100210 reverse_ops_list = []
211 curr_flags = PassFlags.Empty
212 npu_block_type = NpuBlockType.Default
213
214 reverse_intermediates = []
215 input_set = set()
216 ifm_tensor = None
217 primary_op = None
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100218 ifm_shapes = None
Tim Hall79d07d22020-04-27 18:20:16 +0100219
220 to_process = collections.deque()
221 for start_op in start_ops_to_process:
222 to_process.append((start_op, None))
223
224 while to_process:
225 curr_op, tens = to_process.popleft()
226
227 if curr_op in reverse_ops_list:
228 continue
229
230 for operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear in test_sequence:
231 if operation_set is None or curr_op.type in operation_set:
232 if not (curr_flags & incompatible_pack_flags):
233 if flags_to_set & PassFlags.Npu:
234 if not curr_op.run_on_npu:
235 continue
236
237 reverse_ops_list.append(curr_op)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200238 new_block_type = curr_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100239 if new_block_type != NpuBlockType.Default:
240 assert npu_block_type == NpuBlockType.Default
241 npu_block_type = new_block_type # Only one major block type per pass
242 assert primary_op is None
243 primary_op = curr_op
244
245 curr_flags &= ~flags_to_clear
246 curr_flags |= flags_to_set
247
248 if flags_to_set & PassFlags.Npu:
249 if flags_to_set & (
250 PassFlags.Mac | PassFlags.ElementWise | PassFlags.Post | PassFlags.PostFusingLimited
251 ):
252 assert len(curr_op.inputs) >= 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200253 ifm_tensor = curr_op.ifm
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100254 ifm_shapes = curr_op.ifm_shapes.copy()
Louis Verhaard04f8c002020-10-09 11:40:21 +0200255 assert ifm_tensor is not None, "IFM missing in {}".format(curr_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100256 assert ifm_tensor.purpose == TensorPurpose.FeatureMap
257
Tim Hall79d07d22020-04-27 18:20:16 +0100258 if operation_set is None:
259 print("Warning:", curr_op.type, "operation is unknown or unsupported, placing on CPU")
260
Charles Xu600351a2020-05-18 08:54:47 +0200261 for inp in reversed(curr_op.inputs):
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200262 if inp is None:
263 continue
Patrik Gustavssonfcb1a002021-02-03 09:13:57 +0100264 if can_pack(inp, curr_op):
265 to_process.append((inp.ops[0], inp))
Tim Hall79d07d22020-04-27 18:20:16 +0100266 else:
Tim Hall79d07d22020-04-27 18:20:16 +0100267 input_set.add(inp)
268
269 break
270
271 else:
272 # This operation is not compatible with already packed operations, just register the tensor as an input
273 assert tens is not None
274 input_set.add(tens)
275
276 if curr_flags & PassFlags.Npu and not curr_flags & (PassFlags.ElementWise | PassFlags.Mac):
277 # Make the choice that if we don't have a mac operation, the ambidextrous operations go on the
278 # element wise unit
279 curr_flags |= PassFlags.ElementWise
280
281 is_element_wise = True
282 for op in reverse_ops_list:
Tim Halld8339a72021-05-27 18:49:40 +0100283 if op.type not in elem_wise_ops and op.type:
Tim Hall79d07d22020-04-27 18:20:16 +0100284 is_element_wise = False
285 break
286
287 placement = PassPlacement.Unknown
288 if curr_flags & PassFlags.Npu:
289 assert placement == PassPlacement.Unknown
290 placement = PassPlacement.Npu
291 if curr_flags & PassFlags.Cpu:
292 assert placement == PassPlacement.Unknown
293 placement = PassPlacement.Cpu
294 if curr_flags & PassFlags.MemoryOnly:
295 assert placement == PassPlacement.Unknown
296 placement = PassPlacement.MemoryOnly
297 if curr_flags & PassFlags.StartupInit:
298 assert placement == PassPlacement.Unknown
299 placement = PassPlacement.StartupInit
300 assert placement != PassPlacement.Unknown
301
302 ops_list = list(reversed(reverse_ops_list))
303 intermediates = list(reversed(reverse_intermediates))
304
Diego Russoea6111a2020-04-14 18:41:58 +0100305 if primary_op is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100306 primary_op = create_primary_op(ops_list)
Diego Russoea6111a2020-04-14 18:41:58 +0100307 if primary_op is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100308 visit_tensor_refcount[primary_op.inputs[0]] += 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200309 npu_block_type = primary_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100310 for input_tens in primary_op.inputs:
311 if input_tens not in input_set:
312 input_set.add(input_tens)
313
314 ordered_input_list = []
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200315 # Keep LUT-s in a separate list and add as inputs at the end
316 # to avoid that they would accidentally be assigned as ifm or ifm2
317 lut_list = []
Tim Hall79d07d22020-04-27 18:20:16 +0100318 input_refcounts = collections.defaultdict(int)
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200319 input_ops_list = ops_list.copy()
320
321 # Check primary_op first
322 if primary_op is not None:
323 for inp in primary_op.inputs:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200324 if inp is None:
325 continue
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200326 add_input_list(inp, input_set, input_refcounts, lut_list, ordered_input_list)
327 input_ops_list.remove(primary_op)
328
329 # Check rest of the list
330 for op in input_ops_list:
Tim Hall79d07d22020-04-27 18:20:16 +0100331 for inp in op.inputs:
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200332 add_input_list(inp, input_set, input_refcounts, lut_list, ordered_input_list)
Tim Hall79d07d22020-04-27 18:20:16 +0100333
334 name = ops_list[0].name
Tim Hall79d07d22020-04-27 18:20:16 +0100335 ps = Pass(name, placement, is_element_wise, npu_block_type)
336 ps.ops = ops_list
337 ps.primary_op = primary_op
338 ps.inputs = ordered_input_list
339 ps.intermediates = intermediates
340 ps.outputs = list(ops_list[-1].outputs)
Tim Hall79d07d22020-04-27 18:20:16 +0100341
342 # ElementWise operation, 2 IFMs
343 if ps.primary_op and ps.primary_op.type in binary_elem_wise_main_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100344 ps.ifm_tensor = ps.inputs[0]
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200345 ps.ifm2_tensor = ps.inputs[-1]
Tim Hall79d07d22020-04-27 18:20:16 +0100346
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200347 if len(ps.inputs) > 2:
348 ps.ifm_tensor = ps.inputs[-2]
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100349
350 # Get the corresponding ifm_shapes
351 for op in input_ops_list + [primary_op]:
Patrik Gustavsson0a261cd2020-12-23 08:50:44 +0100352 if op.run_on_npu:
353 if ps.ifm_tensor == op.ifm:
354 ps.ifm_shapes.append(op.ifm_shapes[0])
355 elif ps.ifm_tensor == op.ifm2:
356 ps.ifm_shapes.append(op.ifm_shapes[1])
Tim Hallffe8e282021-06-24 18:29:53 +0100357
Patrik Gustavsson0a261cd2020-12-23 08:50:44 +0100358 if ps.ifm2_tensor == op.ifm:
359 ps.ifm_shapes.append(op.ifm_shapes[0])
360 elif ps.ifm2_tensor == op.ifm2:
361 ps.ifm_shapes.append(op.ifm_shapes[1])
Tim Hall79d07d22020-04-27 18:20:16 +0100362 else:
363 ps.ifm_tensor = ifm_tensor
364 ps.ifm2_tensor = None
Patrik Gustavssoncc6915c2020-12-22 09:16:50 +0100365 if ps.primary_op is not None and ps.primary_op.run_on_npu:
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100366 ps.ifm_shapes.append(ifm_shapes[0])
Tim Hall79d07d22020-04-27 18:20:16 +0100367
368 ps.ofm_tensor = ofm_tensor
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100369 ps.ofm_shapes.append(ofm_shape)
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100370
Tim Hall79d07d22020-04-27 18:20:16 +0100371 assert ps.placement != PassPlacement.Npu or ps.ofm_tensor is not None
372 ps.weight_tensor = ps.get_primary_op_ifm_weights()[1]
373 ps.scale_tensor = ps.get_primary_op_ifm_weights_biases_ofm()[2]
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200374 ps.lut_tensor = ps.get_primary_op_lut()
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200375 ps.inputs.extend(lut_list)
Tim Hall79d07d22020-04-27 18:20:16 +0100376
377 for op in ps.ops:
378 op.scheduled_pass = ps
379
380 reverse_pass_list.append(ps)
381
382 for inp, refcount in input_refcounts.items():
383 for _ in range(refcount):
384 visit_tensor(inp)
385
386 return ps
387
388 def visit_tensor(tens):
389 visit_tensor_refcount[tens] += 1
390 assert visit_tensor_refcount[tens] <= len(tens.consumers())
391 if visit_tensor_refcount[tens] == len(tens.consumers()):
Johan Alfvén628928d2022-01-27 06:47:26 +0100392 for op in reversed(tens.ops):
393 visit_op(op, tens)
Tim Hall79d07d22020-04-27 18:20:16 +0100394
Jacob Bohlinfb858732020-08-17 09:42:35 +0200395 def create_primary_op(op_list):
Patrik Gustavssone3b1b912021-02-09 15:38:46 +0100396 if any(op.type in (npu_post_ops | npu_post_fuse_limited_ops) and op.run_on_npu for op in op_list):
Tim Hall79d07d22020-04-27 18:20:16 +0100397 # Configure a 1x1 AvgPool and attach the op onto it
Jacob Bohlinfb858732020-08-17 09:42:35 +0200398 op = op_list[0]
Tim Hall79d07d22020-04-27 18:20:16 +0100399 inp = op.inputs[0]
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100400 avgpool_op = create_avgpool_nop(op.name + "_avgpool")
401 avgpool_op.add_input_tensor(inp)
Tim Hall79d07d22020-04-27 18:20:16 +0100402 avgpool_out = inp.clone("_avgpooled")
403 avgpool_out.consumer_list.append(op)
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100404 avgpool_op.set_output_tensor(avgpool_out)
Patrik Gustavsson3a269202021-01-21 08:28:55 +0100405 avgpool_op.ifm_shapes = op.ifm_shapes.copy()
406 avgpool_op.ofm_shapes = op.ofm_shapes.copy()
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100407 avgpool_op.read_offsets = op.read_offsets.copy()
Tim Hall3df5b962021-12-17 14:09:19 +0000408 avgpool_op.read_shapes = op.read_shapes.copy()
Tim Hall79d07d22020-04-27 18:20:16 +0100409
410 op.inputs[0] = avgpool_out
Jacob Bohlinfb858732020-08-17 09:42:35 +0200411 op_list.insert(0, avgpool_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100412
Tim Halle6ccd872020-11-09 16:46:37 +0000413 DebugDatabase.add_optimised(op, avgpool_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100414 return avgpool_op
415
416 return None
417
Patrik Gustavssonfcb1a002021-02-03 09:13:57 +0100418 def can_pack(inp, curr_op):
419 if len(inp.ops) == 1:
420 next_op = inp.ops[0]
421 for outp in next_op.outputs:
422 consumers = outp.consumers()
423 if len(consumers) > 1 or (len(consumers) == 1 and consumers[0] != curr_op):
424 return False
425
426 # There cannot be any reshaping between next_op ofm and corresponding curr_op ifm
427 if len(curr_op.ifm_shapes) != 0 and len(next_op.ofm_shapes) != 0:
428 if inp == curr_op.ifm and next_op.ofm_shapes[0] != curr_op.ifm_shapes[0]:
429 return False
430 elif (
431 curr_op.ifm2 is not None and inp == curr_op.ifm2 and next_op.ofm_shapes[0] != curr_op.ifm_shapes[1]
432 ):
433 return False
434 else:
435 return False
436
437 return True
438
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200439 def add_input_list(inp_to_add, inp_set, inp_refcnts, lut_list, ordered_inp_list):
440 if inp_to_add in inp_set:
441 if inp_refcnts[inp_to_add] == 0:
442 if inp_to_add.purpose == TensorPurpose.LUT:
443 lut_list.append(inp_to_add)
444 else:
445 ordered_inp_list.append(inp_to_add)
446 inp_refcnts[inp_to_add] += 1
447
Tim Hall79d07d22020-04-27 18:20:16 +0100448 for sg in nng.subgraphs:
449 reverse_pass_list = []
450 visit_op_refcount = collections.defaultdict(int)
451 visit_tensor_refcount = collections.defaultdict(int)
452
453 startup_list = []
454
455 for tens in sg.output_tensors:
456 visit_tensor(tens)
457
458 if startup_list:
459 startup_ps = build_pass(startup_list)
460 startup_ps.outputs = [op.outputs[0] for op in startup_list] # Need to fixup the outputs
461 startup_ps.name = "startup_weight_initialisation"
462
Johan Alfvén0b207812022-04-19 16:07:05 +0200463 # Graphs with both CPU and NPU ops might not have an optimal order in
464 # the pass list due to how the graph is traversed (depth first search).
465 # This can result in more context switching between CPU and NPU.
466 # Try to optmize this by moving/grouping CPU ops where that is possible.
467 # Criteria for CPU pass to be moved:
468 #
469 # 1) CPU passes that only depends on sg.input_tensor can be
470 # moved to the top of the list.
471 #
472 # 2) A CPU pass X is allowed to be grouped together with CPU pass Y
473 # if there is no NPU pass between pass X and pass Y that depends
474 # on output from pass X or a MemoryOnly pass.
475 #
476 # Criteria 2 will try to move as many CPU passes towards the bottom of
477 # the list.
478
479 pass_list_top = []
480 pass_list = []
481
482 # Filter out early passes from the rest
483 for ps in list(reversed(reverse_pass_list)):
484 if startup_ps == ps:
485 # startup pass belongs in the top
486 pass_list_top.insert(0, ps)
487 continue
488
489 if (
490 ps.placement == PassPlacement.Cpu
491 and ps.ops[0].ifm in sg.input_tensors
492 and (ps.ops[0].ifm2 in sg.input_tensors or ps.ops[0].ifm2 is None)
493 ):
494 # This CPU pass only depends on sg.input_tensors
495 pass_list_top.append(ps)
496 else:
497 # Add pass to the list that will be sorted in the next step
498 pass_list.append(ps)
499
500 # Sort the rest of the list based on critera 2.
501 # Search from bottom of list and when a CPU pass is found
502 # search forward in the list and see if it is possible to join another CPU pass.
Johan Alfvén1e363b12022-05-19 07:26:03 +0200503 last_idx = len(pass_list) - 1
Johan Alfvén0b207812022-04-19 16:07:05 +0200504 for cpu_ps in reversed(pass_list):
505 if cpu_ps.placement != PassPlacement.Cpu:
506 continue
507 # CPU pass found, search forward and move pass if possible
508 idx = pass_list.index(cpu_ps)
509 for next_ps in pass_list[idx + 1 :]:
510 if next_ps.placement == PassPlacement.Cpu:
511 # It is possible to move the CPU pass
512 pass_list.remove(cpu_ps)
513 insert_index = pass_list.index(next_ps)
514 pass_list.insert(insert_index, cpu_ps)
515 break
Johan Alfvén0b207812022-04-19 16:07:05 +0200516
Johan Alfvén1e363b12022-05-19 07:26:03 +0200517 if (
518 cpu_ps.ops[0].ofm in [next_ps.ops[0].ifm, next_ps.ops[0].ifm2]
519 or next_ps.placement == PassPlacement.MemoryOnly
520 ):
521 # Not possible to move
522 break
523
524 if pass_list.index(next_ps) == last_idx:
525 # Last element, ok to move the CPU pass
526 pass_list.remove(cpu_ps)
527 pass_list.append(cpu_ps)
528 break
529
Johan Alfvén0b207812022-04-19 16:07:05 +0200530 pass_list_top.extend(pass_list)
531
532 sg.passes = pass_list_top
Tim Hall79d07d22020-04-27 18:20:16 +0100533 sg.build_pass_links()
534
535 if verbose_packing:
536 nng.print_passes()
537
538 return nng