blob: bae815175b29ea7d6000a00047d6651b369155e4 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18# Description:
19# Packs a subgraph with Neural Network Operations into Passes. Each Pass has one or more Operations.
20
Tim Hall79d07d22020-04-27 18:20:16 +010021import enum
Diego Russoea6111a2020-04-14 18:41:58 +010022import collections
23
24from .nn_graph import Pass, PassPlacement
25from .tensor import TensorPurpose
26from .operation import Operation, NpuBlockType
Tim Hall79d07d22020-04-27 18:20:16 +010027
28
29class PassFlags(enum.Flag):
30 Empty = 0
31 Pre = 1
32 Main = 2
33 Post = 4
34 Mac = 8
35 Dma = 32
36 ElementWise = 256
37 Npu = 512
38 Cpu = 1024
39 StartupInit = 2048
40 MemoryOnly = 4096
41 PostFusingLimited = 8192
42
43
44npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead",))
45
46mac_main_ops = set(
47 (
48 # convolutions
49 "Conv2DBiasAct",
50 "Conv2D",
51 "QuantizedConv2D",
52 "Conv2DBackpropInputSwitched",
53 # depth-wise convolutions
54 "DepthwiseConv2dBiasAct",
55 "DepthwiseConv2dNative",
56 "QuantizedDepthwiseConv2D",
57 # FC layers
58 "QuantizedMatMul",
59 "MatMul",
60 "FullyConnectedAct",
61 # RNN/LSTM/GRU
62 "BlockLSTM",
63 # pooling
64 "QuantizedMaxPool",
65 "QuantizedAvgPool",
66 "AvgPool",
67 "MaxPool",
68 "AvgPoolAct",
69 "MaxPoolAct",
70 )
71)
72
73binary_elem_wise_main_ops = set(
74 (
75 # binary element-wise
76 "AddAct",
77 "MulAct",
78 "SubAct",
79 "QuantizedAdd",
80 "QuantizedSub",
81 "QuantizedMul",
82 "Mul",
83 "Add",
84 "Sub",
85 "Minimum",
86 "Maximum",
87 )
88)
89
90unary_elem_wise_main_ops = set(("LeakyRelu", "Abs")) # Unary element-wise operations
91
92elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
93
94activation_ops = set(("QuantizedRelu", "QuantizedRelu1", "QuantizedRelu6", "Relu", "Relu6", "ReluN1To1"))
95npu_post_ops = activation_ops | set(
96 # Bias-add operations: Get rid of these once we have rewrites from Conv2D + BiasAdd + Activation to Conv2DBiasAct.
97 ("Mul", "Add", "QuantizedBiasAdd", "Requantize", "QuantizedBatchNorm", "BiasAdd", "FusedBatchNorm")
98)
99
100npu_post_fuse_limited_ops = set(
101 # Set of post operators that should not be fused with main/elementwise ops
102 ("ConcatSliceWrite", "Sigmoid", "Tanh")
103)
104
105elem_wise_ops = elem_wise_main_ops | activation_ops | set(("Sigmoid", "Tanh"))
106
107
108quantization_ops = set(("Dequantize", "QuantizeV2", "Max", "Min"))
Diego Russoea6111a2020-04-14 18:41:58 +0100109cpu_ops = set(("Softmax", "QuantizedSoftmax", "LRN", "Shape", "QuantizedPad", "Pad", "AddN")) | quantization_ops
Tim Hall79d07d22020-04-27 18:20:16 +0100110
111npu_dma_ops = set(("DMA",))
112startup_init_ops = set(("Const", "VariableV2", "Placeholder", "SubgraphInput"))
113memory_only_ops = set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims",))
114
115
116test_sequence = [
117 (
118 # ops_set
119 npu_post_ops,
120 # incompatible_pack_flags
121 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Pre | PassFlags.Main,
122 # flags_to_set
123 PassFlags.Npu | PassFlags.Post,
124 # flags_to_clear
125 PassFlags.Empty,
126 ),
127 (
128 # ops_set
129 npu_post_fuse_limited_ops,
130 # incompatible_pack_flags
131 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Pre | PassFlags.Main,
132 # flags_to_set
133 PassFlags.Npu | PassFlags.PostFusingLimited,
134 # flags_to_clear
135 PassFlags.Empty,
136 ),
137 (
138 # ops_set
139 mac_main_ops,
140 # incompatible_pack_flags
141 PassFlags.Cpu
142 | PassFlags.MemoryOnly
143 | PassFlags.ElementWise
144 | PassFlags.Pre
145 | PassFlags.Main
146 | PassFlags.PostFusingLimited,
147 # flags_to_set
148 PassFlags.Npu | PassFlags.Mac | PassFlags.Main,
149 # flags_to_clear
150 PassFlags.Empty,
151 ),
152 (
153 # ops_set
154 elem_wise_main_ops,
155 # incompatible_pack_flags
156 PassFlags.Cpu
157 | PassFlags.MemoryOnly
158 | PassFlags.Mac
159 | PassFlags.Pre
160 | PassFlags.Main
161 | PassFlags.PostFusingLimited,
162 # flags_to_set
163 PassFlags.Npu | PassFlags.ElementWise | PassFlags.Main,
164 # flags_to_clear
165 PassFlags.Empty,
166 ),
167 (
168 # ops_set
169 npu_pre_ops,
170 # incompatible_pack_flags
171 PassFlags.Cpu | PassFlags.MemoryOnly,
172 # flags_to_set
173 PassFlags.Npu | PassFlags.Mac | PassFlags.Pre | PassFlags.ElementWise,
174 # flags_to_clear
175 PassFlags.Empty,
176 ),
177 (
178 # ops_set
179 npu_dma_ops,
180 # incompatible_pack_flags
181 PassFlags.Cpu | PassFlags.MemoryOnly,
182 # flags_to_set
183 PassFlags.Npu | PassFlags.Dma,
184 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100185 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100186 ),
187 (
188 # ops_set
189 startup_init_ops,
190 # incompatible_pack_flags
191 PassFlags.Npu | PassFlags.Cpu | PassFlags.MemoryOnly,
192 # flags_to_set
193 PassFlags.StartupInit | PassFlags.Main,
194 # flags_to_clear
195 PassFlags.Empty,
196 ),
197 (
198 # ops_set
199 memory_only_ops,
200 # incompatible_pack_flags
201 PassFlags.Npu | PassFlags.Cpu,
202 # flags_to_set
203 PassFlags.MemoryOnly | PassFlags.Main,
204 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100205 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100206 ),
207 (
208 # ops_set
209 cpu_ops,
210 # incompatible_pack_flags
211 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
212 # flags_to_set
213 PassFlags.Cpu | PassFlags.Main,
214 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100215 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100216 ),
Diego Russoea6111a2020-04-14 18:41:58 +0100217 ( # This last one is a fallback for unrecognised operations
Tim Hall79d07d22020-04-27 18:20:16 +0100218 # ops_set
219 None,
220 # incompatible_pack_flags
221 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
222 # flags_to_set
223 PassFlags.Cpu | PassFlags.Main,
224 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100225 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100226 ),
227]
228
229# Some sanity checking
230for (operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear) in test_sequence:
231 assert not flags_to_clear & flags_to_set
232
233 if operation_set is not None:
234 for op in operation_set:
235 assert len(op) > 1 # This is to avoid string literals being decomposed
236
237
238def pack_into_passes(nng, arch, verbose_packing=False):
239 def visit_op(op, ignored):
240 visit_op_refcount[op] += 1
241
242 if visit_op_refcount[op] == 1: # First-time visit, go and fix up unused output tensors
243 for tens in op.outputs:
244 if len(tens.consumers()) == 0:
245 visit_op_refcount[op] += 1
246
247 assert visit_op_refcount[op] <= len(op.outputs)
248 if visit_op_refcount[op] == len(op.outputs):
249
250 if op.type in startup_init_ops:
251 startup_list.append(op)
252 else:
253 _, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
254 if ofm_tensor is None:
255 ofm_tensor = op.outputs[0]
256 build_pass((op,), ofm_tensor)
257
258 def build_pass(start_ops_to_process, ofm_tensor=None):
259 reverse_ops_list = []
260 curr_flags = PassFlags.Empty
261 npu_block_type = NpuBlockType.Default
262
263 reverse_intermediates = []
264 input_set = set()
265 ifm_tensor = None
266 primary_op = None
267
268 to_process = collections.deque()
269 for start_op in start_ops_to_process:
270 to_process.append((start_op, None))
271
272 while to_process:
273 curr_op, tens = to_process.popleft()
274
275 if curr_op in reverse_ops_list:
276 continue
277
278 for operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear in test_sequence:
279 if operation_set is None or curr_op.type in operation_set:
280 if not (curr_flags & incompatible_pack_flags):
281 if flags_to_set & PassFlags.Npu:
282 if not curr_op.run_on_npu:
283 continue
284
285 reverse_ops_list.append(curr_op)
286 new_block_type = curr_op.attrs.get("npu_block_type", NpuBlockType.Default)
287 if new_block_type != NpuBlockType.Default:
288 assert npu_block_type == NpuBlockType.Default
289 npu_block_type = new_block_type # Only one major block type per pass
290 assert primary_op is None
291 primary_op = curr_op
292
293 curr_flags &= ~flags_to_clear
294 curr_flags |= flags_to_set
295
296 if flags_to_set & PassFlags.Npu:
297 if flags_to_set & (
298 PassFlags.Mac | PassFlags.ElementWise | PassFlags.Post | PassFlags.PostFusingLimited
299 ):
300 assert len(curr_op.inputs) >= 1
301 if curr_op.type == "BlockLSTM":
302 ifm_tensor = curr_op.inputs[3]
303 else:
304 ifm_tensor = curr_op.inputs[0]
305 assert ifm_tensor.purpose == TensorPurpose.FeatureMap
306
307 if flags_to_set & PassFlags.Dma:
308 # DMAs are special - Output buffers need to be preserved as intermediates,
309 # if the pass consumes the results
310 if tens is not None:
311 reverse_intermediates.append(tens)
312
313 if operation_set is None:
314 print("Warning:", curr_op.type, "operation is unknown or unsupported, placing on CPU")
315
316 for inp in curr_op.inputs:
317 can_pack = True
318 if len(inp.ops) == 1:
319 next_op = inp.ops[0]
320 for outp in next_op.outputs:
321 consumers = outp.consumers()
322 if len(consumers) > 1 or (len(consumers) == 1 and consumers[0] != curr_op):
323 can_pack = False
324 break
325 else:
326 can_pack = False
327
328 if can_pack:
329 to_process.append((next_op, inp))
330 else:
331 assert inp is not None
332 input_set.add(inp)
333
334 break
335
336 else:
337 # This operation is not compatible with already packed operations, just register the tensor as an input
338 assert tens is not None
339 input_set.add(tens)
340
341 if curr_flags & PassFlags.Npu and not curr_flags & (PassFlags.ElementWise | PassFlags.Mac):
342 # Make the choice that if we don't have a mac operation, the ambidextrous operations go on the
343 # element wise unit
344 curr_flags |= PassFlags.ElementWise
345
346 is_element_wise = True
347 for op in reverse_ops_list:
Diego Russoea6111a2020-04-14 18:41:58 +0100348 if op.type not in elem_wise_ops and op.type not in npu_dma_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100349 is_element_wise = False
350 break
351
352 placement = PassPlacement.Unknown
353 if curr_flags & PassFlags.Npu:
354 assert placement == PassPlacement.Unknown
355 placement = PassPlacement.Npu
356 if curr_flags & PassFlags.Cpu:
357 assert placement == PassPlacement.Unknown
358 placement = PassPlacement.Cpu
359 if curr_flags & PassFlags.MemoryOnly:
360 assert placement == PassPlacement.Unknown
361 placement = PassPlacement.MemoryOnly
362 if curr_flags & PassFlags.StartupInit:
363 assert placement == PassPlacement.Unknown
364 placement = PassPlacement.StartupInit
365 assert placement != PassPlacement.Unknown
366
367 ops_list = list(reversed(reverse_ops_list))
368 intermediates = list(reversed(reverse_intermediates))
369
Diego Russoea6111a2020-04-14 18:41:58 +0100370 if primary_op is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100371 primary_op = create_primary_op(ops_list)
Diego Russoea6111a2020-04-14 18:41:58 +0100372 if primary_op is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100373 visit_tensor_refcount[primary_op.inputs[0]] += 1
374 npu_block_type = primary_op.attrs["npu_block_type"]
375 for input_tens in primary_op.inputs:
376 if input_tens not in input_set:
377 input_set.add(input_tens)
378
379 ordered_input_list = []
380 input_refcounts = collections.defaultdict(int)
381 for op in ops_list:
382 for inp in op.inputs:
383 if inp in input_set:
384 if input_refcounts[inp] == 0:
385 ordered_input_list.append(inp)
386 input_refcounts[inp] += 1
387
388 name = ops_list[0].name
389 non_dma_ops = [op for op in ops_list if op.type != "DMA"]
390 if non_dma_ops:
391 name = non_dma_ops[0].name
392 ps = Pass(name, placement, is_element_wise, npu_block_type)
393 ps.ops = ops_list
394 ps.primary_op = primary_op
395 ps.inputs = ordered_input_list
396 ps.intermediates = intermediates
397 ps.outputs = list(ops_list[-1].outputs)
398 ps.ifm_tensor = ifm_tensor
399
400 # ElementWise operation, 2 IFMs
401 if ps.primary_op and ps.primary_op.type in binary_elem_wise_main_ops:
402 ps.ifm_tensor = ps.inputs[0]
403
404 if len(ps.inputs) == 1:
405 # Only 1 input, IFM and IFM2 are the same tensor
406 ps.ifm2_tensor = ps.inputs[0]
407 else:
408 ps.ifm2_tensor = ps.inputs[1]
409 else:
410 ps.ifm_tensor = ifm_tensor
411 ps.ifm2_tensor = None
412
413 ps.ofm_tensor = ofm_tensor
414 assert ps.placement != PassPlacement.Npu or ps.ofm_tensor is not None
415 ps.weight_tensor = ps.get_primary_op_ifm_weights()[1]
416 ps.scale_tensor = ps.get_primary_op_ifm_weights_biases_ofm()[2]
417
418 for op in ps.ops:
419 op.scheduled_pass = ps
420
421 reverse_pass_list.append(ps)
422
423 for inp, refcount in input_refcounts.items():
424 for _ in range(refcount):
425 visit_tensor(inp)
426
427 return ps
428
429 def visit_tensor(tens):
430 visit_tensor_refcount[tens] += 1
431 assert visit_tensor_refcount[tens] <= len(tens.consumers())
432 if visit_tensor_refcount[tens] == len(tens.consumers()):
433 for op in reversed(tens.ops):
434 visit_op(op, tens)
435
436 def create_primary_op(ops_list):
437 if any(op.type in (npu_pre_ops | npu_post_ops | npu_post_fuse_limited_ops) for op in ops_list):
438 # Configure a 1x1 AvgPool and attach the op onto it
439 op = ops_list[0]
440 inp = op.inputs[0]
441 avgpool_name = op.name + "_avgpool"
442 avgpool_op = Operation("AvgPool", avgpool_name)
443 avgpool_op.inputs = [inp]
444 avgpool_op.inputs[0].consumer_list.append(avgpool_op)
445 avgpool_op.attrs["padding"] = b"VALID"
446 avgpool_op.attrs["npu_block_type"] = NpuBlockType.Pooling
447 avgpool_op.attrs["stride_w"] = 1
448 avgpool_op.attrs["stride_h"] = 1
449 avgpool_op.attrs["filter_width"] = 1
450 avgpool_op.attrs["filter_height"] = 1
451 avgpool_op.attrs["strides"] = [1, 1, 1, 1]
452 avgpool_op.attrs["ksize"] = [1, 1, 1, 1]
453 avgpool_op.attrs["skirt"] = [0, 0, 0, 0]
454 avgpool_op.attrs["explicit_padding"] = [0, 0, 0, 0]
455 avgpool_out = inp.clone("_avgpooled")
456 avgpool_out.consumer_list.append(op)
457 avgpool_out.ops = [avgpool_op]
458 avgpool_op.outputs = [avgpool_out]
459
460 op.inputs[0] = avgpool_out
461 ops_list.insert(0, avgpool_op)
462
463 return avgpool_op
464
465 return None
466
467 for sg in nng.subgraphs:
468 reverse_pass_list = []
469 visit_op_refcount = collections.defaultdict(int)
470 visit_tensor_refcount = collections.defaultdict(int)
471
472 startup_list = []
473
474 for tens in sg.output_tensors:
475 visit_tensor(tens)
476
477 if startup_list:
478 startup_ps = build_pass(startup_list)
479 startup_ps.outputs = [op.outputs[0] for op in startup_list] # Need to fixup the outputs
480 startup_ps.name = "startup_weight_initialisation"
481
482 sg.passes = list(reversed(reverse_pass_list))
483 sg.build_pass_links()
484
485 if verbose_packing:
486 nng.print_passes()
487
488 return nng