blob: ee0d71287d4815e42dcf05b67be6faefde133fae [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Packs a subgraph with Neural Network Operations into Passes. Each Pass has one or more Operations.
Diego Russoea6111a2020-04-14 18:41:58 +010018import collections
Diego Russoe8a10452020-04-21 17:39:10 +010019import enum
Diego Russoea6111a2020-04-14 18:41:58 +010020
Tim Halle6ccd872020-11-09 16:46:37 +000021from .debug_database import DebugDatabase
Diego Russoe8a10452020-04-21 17:39:10 +010022from .nn_graph import Pass
23from .nn_graph import PassPlacement
24from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020025from .operation import Op
Fredrik Svedbergd9c2c422020-12-01 16:33:45 +010026from .operation_util import create_avgpool_nop
Diego Russoea6111a2020-04-14 18:41:58 +010027from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
30class PassFlags(enum.Flag):
31 Empty = 0
32 Pre = 1
33 Main = 2
34 Post = 4
35 Mac = 8
36 Dma = 32
37 ElementWise = 256
38 Npu = 512
39 Cpu = 1024
40 StartupInit = 2048
41 MemoryOnly = 4096
42 PostFusingLimited = 8192
43
44
Louis Verhaardaee5d752020-09-30 09:01:52 +020045npu_pre_ops = set((Op.SplitSliceRead,))
Tim Hall79d07d22020-04-27 18:20:16 +010046
47mac_main_ops = set(
48 (
49 # convolutions
Louis Verhaardaee5d752020-09-30 09:01:52 +020050 Op.Conv2DBias,
51 Op.Conv2D,
52 Op.QuantizedConv2D,
53 Op.Conv2DBackpropInputSwitchedBias,
Tim Hall79d07d22020-04-27 18:20:16 +010054 # depth-wise convolutions
Louis Verhaardaee5d752020-09-30 09:01:52 +020055 Op.DepthwiseConv2DBias,
Tim Hall79d07d22020-04-27 18:20:16 +010056 # FC layers
Louis Verhaardaee5d752020-09-30 09:01:52 +020057 Op.QuantizedMatMul,
58 Op.MatMul,
59 Op.FullyConnected,
Tim Hall79d07d22020-04-27 18:20:16 +010060 # RNN/LSTM/GRU
Louis Verhaardaee5d752020-09-30 09:01:52 +020061 Op.BlockLSTM,
Tim Hall79d07d22020-04-27 18:20:16 +010062 # pooling
Louis Verhaardaee5d752020-09-30 09:01:52 +020063 Op.QuantizedMaxPool,
64 Op.QuantizedAvgPool,
65 Op.AvgPool,
66 Op.MaxPool,
67 Op.ReduceSum,
Dwight Lidman3ec04ac2020-04-30 11:54:48 +020068 # deconvolution
Louis Verhaardaee5d752020-09-30 09:01:52 +020069 Op.ResizeBilinear,
Tim Hall79d07d22020-04-27 18:20:16 +010070 )
71)
72
Louis Verhaardaee5d752020-09-30 09:01:52 +020073binary_elem_wise_main_ops = Op.op_set(Op.is_binary_elementwise_op)
Tim Hall79d07d22020-04-27 18:20:16 +010074
Michael McGeaghf3e3ad72020-12-02 12:39:03 +000075unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Tim Hall79d07d22020-04-27 18:20:16 +010076
77elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
78
Louis Verhaardaee5d752020-09-30 09:01:52 +020079activation_ops = Op.op_set(Op.is_relu_op)
80npu_post_ops = activation_ops
Tim Hall79d07d22020-04-27 18:20:16 +010081
82npu_post_fuse_limited_ops = set(
83 # Set of post operators that should not be fused with main/elementwise ops
Louis Verhaardaee5d752020-09-30 09:01:52 +020084 (Op.ConcatSliceWrite, Op.Sigmoid, Op.Tanh, Op.Quantize)
Tim Hall79d07d22020-04-27 18:20:16 +010085)
86
Louis Verhaardaee5d752020-09-30 09:01:52 +020087elem_wise_ops = elem_wise_main_ops | activation_ops | set((Op.Sigmoid, Op.Tanh))
Tim Hall79d07d22020-04-27 18:20:16 +010088
89
Louis Verhaardaee5d752020-09-30 09:01:52 +020090quantization_ops = set((Op.Dequantize, Op.Max, Op.Min))
91cpu_ops = set((Op.Softmax, Op.LRN, Op.Shape, Op.Pad, Op.AddN)) | quantization_ops
Tim Hall79d07d22020-04-27 18:20:16 +010092
Louis Verhaardaee5d752020-09-30 09:01:52 +020093npu_dma_ops = set((Op.DMA,))
patrik.gustavsson10683622020-10-14 10:57:46 +000094startup_init_ops = set((Op.Const, Op.Placeholder, Op.SubgraphInput))
Louis Verhaardaee5d752020-09-30 09:01:52 +020095memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape, Op.ExpandDims,))
Tim Hall79d07d22020-04-27 18:20:16 +010096
97
98test_sequence = [
99 (
100 # ops_set
101 npu_post_ops,
102 # incompatible_pack_flags
103 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Pre | PassFlags.Main,
104 # flags_to_set
105 PassFlags.Npu | PassFlags.Post,
106 # flags_to_clear
107 PassFlags.Empty,
108 ),
109 (
110 # ops_set
111 npu_post_fuse_limited_ops,
112 # incompatible_pack_flags
113 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Pre | PassFlags.Main,
114 # flags_to_set
115 PassFlags.Npu | PassFlags.PostFusingLimited,
116 # flags_to_clear
117 PassFlags.Empty,
118 ),
119 (
120 # ops_set
121 mac_main_ops,
122 # incompatible_pack_flags
123 PassFlags.Cpu
124 | PassFlags.MemoryOnly
125 | PassFlags.ElementWise
126 | PassFlags.Pre
127 | PassFlags.Main
128 | PassFlags.PostFusingLimited,
129 # flags_to_set
130 PassFlags.Npu | PassFlags.Mac | PassFlags.Main,
131 # flags_to_clear
132 PassFlags.Empty,
133 ),
134 (
135 # ops_set
136 elem_wise_main_ops,
137 # incompatible_pack_flags
138 PassFlags.Cpu
139 | PassFlags.MemoryOnly
140 | PassFlags.Mac
141 | PassFlags.Pre
142 | PassFlags.Main
143 | PassFlags.PostFusingLimited,
144 # flags_to_set
145 PassFlags.Npu | PassFlags.ElementWise | PassFlags.Main,
146 # flags_to_clear
147 PassFlags.Empty,
148 ),
149 (
150 # ops_set
151 npu_pre_ops,
152 # incompatible_pack_flags
153 PassFlags.Cpu | PassFlags.MemoryOnly,
154 # flags_to_set
155 PassFlags.Npu | PassFlags.Mac | PassFlags.Pre | PassFlags.ElementWise,
156 # flags_to_clear
157 PassFlags.Empty,
158 ),
159 (
160 # ops_set
161 npu_dma_ops,
162 # incompatible_pack_flags
163 PassFlags.Cpu | PassFlags.MemoryOnly,
164 # flags_to_set
165 PassFlags.Npu | PassFlags.Dma,
166 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100167 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100168 ),
169 (
170 # ops_set
171 startup_init_ops,
172 # incompatible_pack_flags
173 PassFlags.Npu | PassFlags.Cpu | PassFlags.MemoryOnly,
174 # flags_to_set
175 PassFlags.StartupInit | PassFlags.Main,
176 # flags_to_clear
177 PassFlags.Empty,
178 ),
179 (
180 # ops_set
181 memory_only_ops,
182 # incompatible_pack_flags
183 PassFlags.Npu | PassFlags.Cpu,
184 # flags_to_set
185 PassFlags.MemoryOnly | PassFlags.Main,
186 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100187 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100188 ),
189 (
190 # ops_set
191 cpu_ops,
192 # incompatible_pack_flags
193 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
194 # flags_to_set
195 PassFlags.Cpu | PassFlags.Main,
196 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100197 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100198 ),
Diego Russoea6111a2020-04-14 18:41:58 +0100199 ( # This last one is a fallback for unrecognised operations
Tim Hall79d07d22020-04-27 18:20:16 +0100200 # ops_set
201 None,
202 # incompatible_pack_flags
203 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
204 # flags_to_set
205 PassFlags.Cpu | PassFlags.Main,
206 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100207 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100208 ),
209]
210
211# Some sanity checking
212for (operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear) in test_sequence:
213 assert not flags_to_clear & flags_to_set
214
Tim Hall79d07d22020-04-27 18:20:16 +0100215
216def pack_into_passes(nng, arch, verbose_packing=False):
217 def visit_op(op, ignored):
218 visit_op_refcount[op] += 1
219
220 if visit_op_refcount[op] == 1: # First-time visit, go and fix up unused output tensors
221 for tens in op.outputs:
222 if len(tens.consumers()) == 0:
223 visit_op_refcount[op] += 1
224
225 assert visit_op_refcount[op] <= len(op.outputs)
226 if visit_op_refcount[op] == len(op.outputs):
227
228 if op.type in startup_init_ops:
229 startup_list.append(op)
230 else:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200231 ofm_tensor = op.ofm
Tim Hall79d07d22020-04-27 18:20:16 +0100232 if ofm_tensor is None:
233 ofm_tensor = op.outputs[0]
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100234 ofm_shape = op.ofm_shapes[0].clone() if op.run_on_npu else None
Tim Hall79d07d22020-04-27 18:20:16 +0100235
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100236 build_pass((op,), ofm_tensor, ofm_shape)
237
238 def build_pass(start_ops_to_process, ofm_tensor=None, ofm_shape=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100239 reverse_ops_list = []
240 curr_flags = PassFlags.Empty
241 npu_block_type = NpuBlockType.Default
242
243 reverse_intermediates = []
244 input_set = set()
245 ifm_tensor = None
246 primary_op = None
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100247 ifm_shapes = None
Tim Hall79d07d22020-04-27 18:20:16 +0100248
249 to_process = collections.deque()
250 for start_op in start_ops_to_process:
251 to_process.append((start_op, None))
252
253 while to_process:
254 curr_op, tens = to_process.popleft()
255
256 if curr_op in reverse_ops_list:
257 continue
258
259 for operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear in test_sequence:
260 if operation_set is None or curr_op.type in operation_set:
261 if not (curr_flags & incompatible_pack_flags):
262 if flags_to_set & PassFlags.Npu:
263 if not curr_op.run_on_npu:
264 continue
265
266 reverse_ops_list.append(curr_op)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200267 new_block_type = curr_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100268 if new_block_type != NpuBlockType.Default:
269 assert npu_block_type == NpuBlockType.Default
270 npu_block_type = new_block_type # Only one major block type per pass
271 assert primary_op is None
272 primary_op = curr_op
273
274 curr_flags &= ~flags_to_clear
275 curr_flags |= flags_to_set
276
277 if flags_to_set & PassFlags.Npu:
278 if flags_to_set & (
279 PassFlags.Mac | PassFlags.ElementWise | PassFlags.Post | PassFlags.PostFusingLimited
280 ):
281 assert len(curr_op.inputs) >= 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200282 ifm_tensor = curr_op.ifm
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100283 ifm_shapes = curr_op.ifm_shapes.copy()
Louis Verhaard04f8c002020-10-09 11:40:21 +0200284 assert ifm_tensor is not None, "IFM missing in {}".format(curr_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100285 assert ifm_tensor.purpose == TensorPurpose.FeatureMap
286
287 if flags_to_set & PassFlags.Dma:
288 # DMAs are special - Output buffers need to be preserved as intermediates,
289 # if the pass consumes the results
290 if tens is not None:
291 reverse_intermediates.append(tens)
292
293 if operation_set is None:
294 print("Warning:", curr_op.type, "operation is unknown or unsupported, placing on CPU")
295
Charles Xu600351a2020-05-18 08:54:47 +0200296 for inp in reversed(curr_op.inputs):
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200297 if inp is None:
298 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100299 can_pack = True
300 if len(inp.ops) == 1:
301 next_op = inp.ops[0]
302 for outp in next_op.outputs:
303 consumers = outp.consumers()
304 if len(consumers) > 1 or (len(consumers) == 1 and consumers[0] != curr_op):
305 can_pack = False
306 break
307 else:
308 can_pack = False
309
310 if can_pack:
311 to_process.append((next_op, inp))
312 else:
313 assert inp is not None
314 input_set.add(inp)
315
316 break
317
318 else:
319 # This operation is not compatible with already packed operations, just register the tensor as an input
320 assert tens is not None
321 input_set.add(tens)
322
323 if curr_flags & PassFlags.Npu and not curr_flags & (PassFlags.ElementWise | PassFlags.Mac):
324 # Make the choice that if we don't have a mac operation, the ambidextrous operations go on the
325 # element wise unit
326 curr_flags |= PassFlags.ElementWise
327
328 is_element_wise = True
329 for op in reverse_ops_list:
Diego Russoea6111a2020-04-14 18:41:58 +0100330 if op.type not in elem_wise_ops and op.type not in npu_dma_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100331 is_element_wise = False
332 break
333
334 placement = PassPlacement.Unknown
335 if curr_flags & PassFlags.Npu:
336 assert placement == PassPlacement.Unknown
337 placement = PassPlacement.Npu
338 if curr_flags & PassFlags.Cpu:
339 assert placement == PassPlacement.Unknown
340 placement = PassPlacement.Cpu
341 if curr_flags & PassFlags.MemoryOnly:
342 assert placement == PassPlacement.Unknown
343 placement = PassPlacement.MemoryOnly
344 if curr_flags & PassFlags.StartupInit:
345 assert placement == PassPlacement.Unknown
346 placement = PassPlacement.StartupInit
347 assert placement != PassPlacement.Unknown
348
349 ops_list = list(reversed(reverse_ops_list))
350 intermediates = list(reversed(reverse_intermediates))
351
Diego Russoea6111a2020-04-14 18:41:58 +0100352 if primary_op is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100353 primary_op = create_primary_op(ops_list)
Diego Russoea6111a2020-04-14 18:41:58 +0100354 if primary_op is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100355 visit_tensor_refcount[primary_op.inputs[0]] += 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200356 npu_block_type = primary_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100357 for input_tens in primary_op.inputs:
358 if input_tens not in input_set:
359 input_set.add(input_tens)
360
361 ordered_input_list = []
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200362 # Keep LUT-s in a separate list and add as inputs at the end
363 # to avoid that they would accidentally be assigned as ifm or ifm2
364 lut_list = []
Tim Hall79d07d22020-04-27 18:20:16 +0100365 input_refcounts = collections.defaultdict(int)
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200366 input_ops_list = ops_list.copy()
367
368 # Check primary_op first
369 if primary_op is not None:
370 for inp in primary_op.inputs:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200371 if inp is None:
372 continue
Louis Verhaardaee5d752020-09-30 09:01:52 +0200373 if len(inp.ops) == 1 and inp.ops[0].type == Op.DMA and inp.purpose == TensorPurpose.FeatureMap:
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200374 src_op = inp.ops[0]
375 if src_op in input_ops_list:
376 inp = src_op.inputs[0]
377 input_ops_list.remove(src_op)
378 add_input_list(inp, input_set, input_refcounts, lut_list, ordered_input_list)
379 input_ops_list.remove(primary_op)
380
381 # Check rest of the list
382 for op in input_ops_list:
Tim Hall79d07d22020-04-27 18:20:16 +0100383 for inp in op.inputs:
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200384 add_input_list(inp, input_set, input_refcounts, lut_list, ordered_input_list)
Tim Hall79d07d22020-04-27 18:20:16 +0100385
386 name = ops_list[0].name
Louis Verhaardaee5d752020-09-30 09:01:52 +0200387 non_dma_ops = [op for op in ops_list if op.type != Op.DMA]
Tim Hall79d07d22020-04-27 18:20:16 +0100388 if non_dma_ops:
389 name = non_dma_ops[0].name
390 ps = Pass(name, placement, is_element_wise, npu_block_type)
391 ps.ops = ops_list
392 ps.primary_op = primary_op
393 ps.inputs = ordered_input_list
394 ps.intermediates = intermediates
395 ps.outputs = list(ops_list[-1].outputs)
Tim Hall79d07d22020-04-27 18:20:16 +0100396
397 # ElementWise operation, 2 IFMs
398 if ps.primary_op and ps.primary_op.type in binary_elem_wise_main_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100399 ps.ifm_tensor = ps.inputs[0]
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200400 ps.ifm2_tensor = ps.inputs[-1]
Tim Hall79d07d22020-04-27 18:20:16 +0100401
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200402 if len(ps.inputs) > 2:
403 ps.ifm_tensor = ps.inputs[-2]
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100404
405 # Get the corresponding ifm_shapes
406 for op in input_ops_list + [primary_op]:
Patrik Gustavsson0a261cd2020-12-23 08:50:44 +0100407 if op.run_on_npu:
408 if ps.ifm_tensor == op.ifm:
409 ps.ifm_shapes.append(op.ifm_shapes[0])
410 elif ps.ifm_tensor == op.ifm2:
411 ps.ifm_shapes.append(op.ifm_shapes[1])
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100412 for op in input_ops_list + [primary_op]:
Patrik Gustavsson0a261cd2020-12-23 08:50:44 +0100413 if op.run_on_npu:
414 if ps.ifm2_tensor == op.ifm:
415 ps.ifm_shapes.append(op.ifm_shapes[0])
416 elif ps.ifm2_tensor == op.ifm2:
417 ps.ifm_shapes.append(op.ifm_shapes[1])
Tim Hall79d07d22020-04-27 18:20:16 +0100418 else:
419 ps.ifm_tensor = ifm_tensor
420 ps.ifm2_tensor = None
Patrik Gustavssoncc6915c2020-12-22 09:16:50 +0100421 if ps.primary_op is not None and ps.primary_op.run_on_npu:
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100422 ps.ifm_shapes.append(ifm_shapes[0])
Tim Hall79d07d22020-04-27 18:20:16 +0100423
424 ps.ofm_tensor = ofm_tensor
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100425 ps.ofm_shapes.append(ofm_shape)
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100426
Tim Hall79d07d22020-04-27 18:20:16 +0100427 assert ps.placement != PassPlacement.Npu or ps.ofm_tensor is not None
428 ps.weight_tensor = ps.get_primary_op_ifm_weights()[1]
429 ps.scale_tensor = ps.get_primary_op_ifm_weights_biases_ofm()[2]
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200430 ps.lut_tensor = ps.get_primary_op_lut()
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200431 ps.inputs.extend(lut_list)
Tim Hall79d07d22020-04-27 18:20:16 +0100432
433 for op in ps.ops:
434 op.scheduled_pass = ps
435
436 reverse_pass_list.append(ps)
437
438 for inp, refcount in input_refcounts.items():
439 for _ in range(refcount):
440 visit_tensor(inp)
441
442 return ps
443
444 def visit_tensor(tens):
445 visit_tensor_refcount[tens] += 1
446 assert visit_tensor_refcount[tens] <= len(tens.consumers())
447 if visit_tensor_refcount[tens] == len(tens.consumers()):
448 for op in reversed(tens.ops):
449 visit_op(op, tens)
450
Jacob Bohlinfb858732020-08-17 09:42:35 +0200451 def create_primary_op(op_list):
452 if any(op.type in (npu_pre_ops | npu_post_ops | npu_post_fuse_limited_ops) and op.run_on_npu for op in op_list):
Tim Hall79d07d22020-04-27 18:20:16 +0100453 # Configure a 1x1 AvgPool and attach the op onto it
Jacob Bohlinfb858732020-08-17 09:42:35 +0200454 op = op_list[0]
Tim Hall79d07d22020-04-27 18:20:16 +0100455 inp = op.inputs[0]
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100456 avgpool_op = create_avgpool_nop(op.name + "_avgpool")
457 avgpool_op.add_input_tensor(inp)
Tim Hall79d07d22020-04-27 18:20:16 +0100458 avgpool_out = inp.clone("_avgpooled")
459 avgpool_out.consumer_list.append(op)
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100460 avgpool_op.set_output_tensor(avgpool_out)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000461 avgpool_op.set_ifm_ofm_shapes()
Tim Hall79d07d22020-04-27 18:20:16 +0100462
463 op.inputs[0] = avgpool_out
Jacob Bohlinfb858732020-08-17 09:42:35 +0200464 op_list.insert(0, avgpool_op)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000465 op.set_ifm_ofm_shapes()
Tim Hall79d07d22020-04-27 18:20:16 +0100466
Tim Halle6ccd872020-11-09 16:46:37 +0000467 DebugDatabase.add_optimised(op, avgpool_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100468 return avgpool_op
469
470 return None
471
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200472 def add_input_list(inp_to_add, inp_set, inp_refcnts, lut_list, ordered_inp_list):
473 if inp_to_add in inp_set:
474 if inp_refcnts[inp_to_add] == 0:
475 if inp_to_add.purpose == TensorPurpose.LUT:
476 lut_list.append(inp_to_add)
477 else:
478 ordered_inp_list.append(inp_to_add)
479 inp_refcnts[inp_to_add] += 1
480
Tim Hall79d07d22020-04-27 18:20:16 +0100481 for sg in nng.subgraphs:
482 reverse_pass_list = []
483 visit_op_refcount = collections.defaultdict(int)
484 visit_tensor_refcount = collections.defaultdict(int)
485
486 startup_list = []
487
488 for tens in sg.output_tensors:
489 visit_tensor(tens)
490
491 if startup_list:
492 startup_ps = build_pass(startup_list)
493 startup_ps.outputs = [op.outputs[0] for op in startup_list] # Need to fixup the outputs
494 startup_ps.name = "startup_weight_initialisation"
495
496 sg.passes = list(reversed(reverse_pass_list))
497 sg.build_pass_links()
498
499 if verbose_packing:
500 nng.print_passes()
501
502 return nng