blob: 2a1903daaecb9d0389bc5c070ebeca8163101d27 [file] [log] [blame]
Patrik Gustavssone3b1b912021-02-09 15:38:46 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Packs a subgraph with Neural Network Operations into Passes. Each Pass has one or more Operations.
Diego Russoea6111a2020-04-14 18:41:58 +010018import collections
Diego Russoe8a10452020-04-21 17:39:10 +010019import enum
Diego Russoea6111a2020-04-14 18:41:58 +010020
Tim Halle6ccd872020-11-09 16:46:37 +000021from .debug_database import DebugDatabase
Diego Russoe8a10452020-04-21 17:39:10 +010022from .nn_graph import Pass
23from .nn_graph import PassPlacement
24from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020025from .operation import Op
Fredrik Svedbergd9c2c422020-12-01 16:33:45 +010026from .operation_util import create_avgpool_nop
Diego Russoea6111a2020-04-14 18:41:58 +010027from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
30class PassFlags(enum.Flag):
31 Empty = 0
Patrik Gustavsson56b6c712021-02-16 12:57:03 +010032 Main = 1
33 Post = 2
34 Mac = 4
35 Dma = 8
36 ElementWise = 16
37 Npu = 32
38 Cpu = 64
39 StartupInit = 128
40 MemoryOnly = 256
41 PostFusingLimited = 512
Tim Hall79d07d22020-04-27 18:20:16 +010042
43
Tim Hall79d07d22020-04-27 18:20:16 +010044mac_main_ops = set(
45 (
46 # convolutions
Louis Verhaardaee5d752020-09-30 09:01:52 +020047 Op.Conv2DBias,
48 Op.Conv2D,
49 Op.QuantizedConv2D,
50 Op.Conv2DBackpropInputSwitchedBias,
Tim Hall79d07d22020-04-27 18:20:16 +010051 # depth-wise convolutions
Louis Verhaardaee5d752020-09-30 09:01:52 +020052 Op.DepthwiseConv2DBias,
Tim Hall79d07d22020-04-27 18:20:16 +010053 # FC layers
Louis Verhaardaee5d752020-09-30 09:01:52 +020054 Op.QuantizedMatMul,
55 Op.MatMul,
56 Op.FullyConnected,
Tim Hall79d07d22020-04-27 18:20:16 +010057 # RNN/LSTM/GRU
Louis Verhaardaee5d752020-09-30 09:01:52 +020058 Op.BlockLSTM,
Tim Hall79d07d22020-04-27 18:20:16 +010059 # pooling
Louis Verhaardaee5d752020-09-30 09:01:52 +020060 Op.QuantizedMaxPool,
61 Op.QuantizedAvgPool,
62 Op.AvgPool,
63 Op.MaxPool,
64 Op.ReduceSum,
Dwight Lidman3ec04ac2020-04-30 11:54:48 +020065 # deconvolution
Louis Verhaardaee5d752020-09-30 09:01:52 +020066 Op.ResizeBilinear,
Tim Hall79d07d22020-04-27 18:20:16 +010067 )
68)
69
Louis Verhaardaee5d752020-09-30 09:01:52 +020070binary_elem_wise_main_ops = Op.op_set(Op.is_binary_elementwise_op)
Tim Hall79d07d22020-04-27 18:20:16 +010071
Michael McGeaghf3e3ad72020-12-02 12:39:03 +000072unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Tim Hall79d07d22020-04-27 18:20:16 +010073
74elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
75
Louis Verhaardaee5d752020-09-30 09:01:52 +020076activation_ops = Op.op_set(Op.is_relu_op)
77npu_post_ops = activation_ops
Tim Hall79d07d22020-04-27 18:20:16 +010078
79npu_post_fuse_limited_ops = set(
80 # Set of post operators that should not be fused with main/elementwise ops
Patrik Gustavsson138d47f2021-02-08 10:13:48 +010081 (Op.Sigmoid, Op.Tanh, Op.Quantize)
Tim Hall79d07d22020-04-27 18:20:16 +010082)
83
Louis Verhaardaee5d752020-09-30 09:01:52 +020084elem_wise_ops = elem_wise_main_ops | activation_ops | set((Op.Sigmoid, Op.Tanh))
Tim Hall79d07d22020-04-27 18:20:16 +010085
86
Louis Verhaardaee5d752020-09-30 09:01:52 +020087quantization_ops = set((Op.Dequantize, Op.Max, Op.Min))
88cpu_ops = set((Op.Softmax, Op.LRN, Op.Shape, Op.Pad, Op.AddN)) | quantization_ops
Tim Hall79d07d22020-04-27 18:20:16 +010089
Louis Verhaardaee5d752020-09-30 09:01:52 +020090npu_dma_ops = set((Op.DMA,))
patrik.gustavsson10683622020-10-14 10:57:46 +000091startup_init_ops = set((Op.Const, Op.Placeholder, Op.SubgraphInput))
Louis Verhaardaee5d752020-09-30 09:01:52 +020092memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape, Op.ExpandDims,))
Tim Hall79d07d22020-04-27 18:20:16 +010093
94
95test_sequence = [
96 (
97 # ops_set
98 npu_post_ops,
99 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100100 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Main,
Tim Hall79d07d22020-04-27 18:20:16 +0100101 # flags_to_set
102 PassFlags.Npu | PassFlags.Post,
103 # flags_to_clear
104 PassFlags.Empty,
105 ),
106 (
107 # ops_set
108 npu_post_fuse_limited_ops,
109 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100110 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Main,
Tim Hall79d07d22020-04-27 18:20:16 +0100111 # flags_to_set
112 PassFlags.Npu | PassFlags.PostFusingLimited,
113 # flags_to_clear
114 PassFlags.Empty,
115 ),
116 (
117 # ops_set
118 mac_main_ops,
119 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100120 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.ElementWise | PassFlags.Main | PassFlags.PostFusingLimited,
Tim Hall79d07d22020-04-27 18:20:16 +0100121 # flags_to_set
122 PassFlags.Npu | PassFlags.Mac | PassFlags.Main,
123 # flags_to_clear
124 PassFlags.Empty,
125 ),
126 (
127 # ops_set
128 elem_wise_main_ops,
129 # incompatible_pack_flags
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100130 PassFlags.Cpu | PassFlags.MemoryOnly | PassFlags.Mac | PassFlags.Main | PassFlags.PostFusingLimited,
Tim Hall79d07d22020-04-27 18:20:16 +0100131 # flags_to_set
132 PassFlags.Npu | PassFlags.ElementWise | PassFlags.Main,
133 # flags_to_clear
134 PassFlags.Empty,
135 ),
136 (
137 # ops_set
Tim Hall79d07d22020-04-27 18:20:16 +0100138 npu_dma_ops,
139 # incompatible_pack_flags
140 PassFlags.Cpu | PassFlags.MemoryOnly,
141 # flags_to_set
142 PassFlags.Npu | PassFlags.Dma,
143 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100144 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100145 ),
146 (
147 # ops_set
148 startup_init_ops,
149 # incompatible_pack_flags
150 PassFlags.Npu | PassFlags.Cpu | PassFlags.MemoryOnly,
151 # flags_to_set
152 PassFlags.StartupInit | PassFlags.Main,
153 # flags_to_clear
154 PassFlags.Empty,
155 ),
156 (
157 # ops_set
158 memory_only_ops,
159 # incompatible_pack_flags
160 PassFlags.Npu | PassFlags.Cpu,
161 # flags_to_set
162 PassFlags.MemoryOnly | PassFlags.Main,
163 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100164 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100165 ),
166 (
167 # ops_set
168 cpu_ops,
169 # incompatible_pack_flags
170 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
171 # flags_to_set
172 PassFlags.Cpu | PassFlags.Main,
173 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100174 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100175 ),
Diego Russoea6111a2020-04-14 18:41:58 +0100176 ( # This last one is a fallback for unrecognised operations
Tim Hall79d07d22020-04-27 18:20:16 +0100177 # ops_set
178 None,
179 # incompatible_pack_flags
180 PassFlags.Npu | PassFlags.MemoryOnly | PassFlags.Main,
181 # flags_to_set
182 PassFlags.Cpu | PassFlags.Main,
183 # flags_to_clear
Diego Russoea6111a2020-04-14 18:41:58 +0100184 PassFlags.Empty,
Tim Hall79d07d22020-04-27 18:20:16 +0100185 ),
186]
187
188# Some sanity checking
189for (operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear) in test_sequence:
190 assert not flags_to_clear & flags_to_set
191
Tim Hall79d07d22020-04-27 18:20:16 +0100192
193def pack_into_passes(nng, arch, verbose_packing=False):
194 def visit_op(op, ignored):
195 visit_op_refcount[op] += 1
196
197 if visit_op_refcount[op] == 1: # First-time visit, go and fix up unused output tensors
198 for tens in op.outputs:
199 if len(tens.consumers()) == 0:
200 visit_op_refcount[op] += 1
201
202 assert visit_op_refcount[op] <= len(op.outputs)
203 if visit_op_refcount[op] == len(op.outputs):
204
205 if op.type in startup_init_ops:
206 startup_list.append(op)
207 else:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200208 ofm_tensor = op.ofm
Tim Hall79d07d22020-04-27 18:20:16 +0100209 if ofm_tensor is None:
210 ofm_tensor = op.outputs[0]
Tim Hall73e843f2021-02-04 22:47:46 +0000211 ofm_shape = op.ofm_shapes[0] if op.run_on_npu else None
Tim Hall79d07d22020-04-27 18:20:16 +0100212
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100213 build_pass((op,), ofm_tensor, ofm_shape)
214
215 def build_pass(start_ops_to_process, ofm_tensor=None, ofm_shape=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100216 reverse_ops_list = []
217 curr_flags = PassFlags.Empty
218 npu_block_type = NpuBlockType.Default
219
220 reverse_intermediates = []
221 input_set = set()
222 ifm_tensor = None
223 primary_op = None
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100224 ifm_shapes = None
Tim Hall79d07d22020-04-27 18:20:16 +0100225
226 to_process = collections.deque()
227 for start_op in start_ops_to_process:
228 to_process.append((start_op, None))
229
230 while to_process:
231 curr_op, tens = to_process.popleft()
232
233 if curr_op in reverse_ops_list:
234 continue
235
236 for operation_set, incompatible_pack_flags, flags_to_set, flags_to_clear in test_sequence:
237 if operation_set is None or curr_op.type in operation_set:
238 if not (curr_flags & incompatible_pack_flags):
239 if flags_to_set & PassFlags.Npu:
240 if not curr_op.run_on_npu:
241 continue
242
243 reverse_ops_list.append(curr_op)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200244 new_block_type = curr_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100245 if new_block_type != NpuBlockType.Default:
246 assert npu_block_type == NpuBlockType.Default
247 npu_block_type = new_block_type # Only one major block type per pass
248 assert primary_op is None
249 primary_op = curr_op
250
251 curr_flags &= ~flags_to_clear
252 curr_flags |= flags_to_set
253
254 if flags_to_set & PassFlags.Npu:
255 if flags_to_set & (
256 PassFlags.Mac | PassFlags.ElementWise | PassFlags.Post | PassFlags.PostFusingLimited
257 ):
258 assert len(curr_op.inputs) >= 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200259 ifm_tensor = curr_op.ifm
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100260 ifm_shapes = curr_op.ifm_shapes.copy()
Louis Verhaard04f8c002020-10-09 11:40:21 +0200261 assert ifm_tensor is not None, "IFM missing in {}".format(curr_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100262 assert ifm_tensor.purpose == TensorPurpose.FeatureMap
263
264 if flags_to_set & PassFlags.Dma:
265 # DMAs are special - Output buffers need to be preserved as intermediates,
266 # if the pass consumes the results
267 if tens is not None:
268 reverse_intermediates.append(tens)
269
270 if operation_set is None:
271 print("Warning:", curr_op.type, "operation is unknown or unsupported, placing on CPU")
272
Charles Xu600351a2020-05-18 08:54:47 +0200273 for inp in reversed(curr_op.inputs):
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200274 if inp is None:
275 continue
Patrik Gustavssonfcb1a002021-02-03 09:13:57 +0100276 if can_pack(inp, curr_op):
277 to_process.append((inp.ops[0], inp))
Tim Hall79d07d22020-04-27 18:20:16 +0100278 else:
Tim Hall79d07d22020-04-27 18:20:16 +0100279 input_set.add(inp)
280
281 break
282
283 else:
284 # This operation is not compatible with already packed operations, just register the tensor as an input
285 assert tens is not None
286 input_set.add(tens)
287
288 if curr_flags & PassFlags.Npu and not curr_flags & (PassFlags.ElementWise | PassFlags.Mac):
289 # Make the choice that if we don't have a mac operation, the ambidextrous operations go on the
290 # element wise unit
291 curr_flags |= PassFlags.ElementWise
292
293 is_element_wise = True
294 for op in reverse_ops_list:
Diego Russoea6111a2020-04-14 18:41:58 +0100295 if op.type not in elem_wise_ops and op.type not in npu_dma_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100296 is_element_wise = False
297 break
298
299 placement = PassPlacement.Unknown
300 if curr_flags & PassFlags.Npu:
301 assert placement == PassPlacement.Unknown
302 placement = PassPlacement.Npu
303 if curr_flags & PassFlags.Cpu:
304 assert placement == PassPlacement.Unknown
305 placement = PassPlacement.Cpu
306 if curr_flags & PassFlags.MemoryOnly:
307 assert placement == PassPlacement.Unknown
308 placement = PassPlacement.MemoryOnly
309 if curr_flags & PassFlags.StartupInit:
310 assert placement == PassPlacement.Unknown
311 placement = PassPlacement.StartupInit
312 assert placement != PassPlacement.Unknown
313
314 ops_list = list(reversed(reverse_ops_list))
315 intermediates = list(reversed(reverse_intermediates))
316
Diego Russoea6111a2020-04-14 18:41:58 +0100317 if primary_op is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100318 primary_op = create_primary_op(ops_list)
Diego Russoea6111a2020-04-14 18:41:58 +0100319 if primary_op is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100320 visit_tensor_refcount[primary_op.inputs[0]] += 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200321 npu_block_type = primary_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100322 for input_tens in primary_op.inputs:
323 if input_tens not in input_set:
324 input_set.add(input_tens)
325
326 ordered_input_list = []
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200327 # Keep LUT-s in a separate list and add as inputs at the end
328 # to avoid that they would accidentally be assigned as ifm or ifm2
329 lut_list = []
Tim Hall79d07d22020-04-27 18:20:16 +0100330 input_refcounts = collections.defaultdict(int)
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200331 input_ops_list = ops_list.copy()
332
333 # Check primary_op first
334 if primary_op is not None:
335 for inp in primary_op.inputs:
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200336 if inp is None:
337 continue
Louis Verhaardaee5d752020-09-30 09:01:52 +0200338 if len(inp.ops) == 1 and inp.ops[0].type == Op.DMA and inp.purpose == TensorPurpose.FeatureMap:
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200339 src_op = inp.ops[0]
340 if src_op in input_ops_list:
341 inp = src_op.inputs[0]
342 input_ops_list.remove(src_op)
343 add_input_list(inp, input_set, input_refcounts, lut_list, ordered_input_list)
344 input_ops_list.remove(primary_op)
345
346 # Check rest of the list
347 for op in input_ops_list:
Tim Hall79d07d22020-04-27 18:20:16 +0100348 for inp in op.inputs:
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200349 add_input_list(inp, input_set, input_refcounts, lut_list, ordered_input_list)
Tim Hall79d07d22020-04-27 18:20:16 +0100350
351 name = ops_list[0].name
Louis Verhaardaee5d752020-09-30 09:01:52 +0200352 non_dma_ops = [op for op in ops_list if op.type != Op.DMA]
Tim Hall79d07d22020-04-27 18:20:16 +0100353 if non_dma_ops:
354 name = non_dma_ops[0].name
355 ps = Pass(name, placement, is_element_wise, npu_block_type)
356 ps.ops = ops_list
357 ps.primary_op = primary_op
358 ps.inputs = ordered_input_list
359 ps.intermediates = intermediates
360 ps.outputs = list(ops_list[-1].outputs)
Tim Hall79d07d22020-04-27 18:20:16 +0100361
362 # ElementWise operation, 2 IFMs
363 if ps.primary_op and ps.primary_op.type in binary_elem_wise_main_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100364 ps.ifm_tensor = ps.inputs[0]
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200365 ps.ifm2_tensor = ps.inputs[-1]
Tim Hall79d07d22020-04-27 18:20:16 +0100366
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200367 if len(ps.inputs) > 2:
368 ps.ifm_tensor = ps.inputs[-2]
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100369
370 # Get the corresponding ifm_shapes
371 for op in input_ops_list + [primary_op]:
Patrik Gustavsson0a261cd2020-12-23 08:50:44 +0100372 if op.run_on_npu:
373 if ps.ifm_tensor == op.ifm:
374 ps.ifm_shapes.append(op.ifm_shapes[0])
375 elif ps.ifm_tensor == op.ifm2:
376 ps.ifm_shapes.append(op.ifm_shapes[1])
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100377 for op in input_ops_list + [primary_op]:
Patrik Gustavsson0a261cd2020-12-23 08:50:44 +0100378 if op.run_on_npu:
379 if ps.ifm2_tensor == op.ifm:
380 ps.ifm_shapes.append(op.ifm_shapes[0])
381 elif ps.ifm2_tensor == op.ifm2:
382 ps.ifm_shapes.append(op.ifm_shapes[1])
Tim Hall79d07d22020-04-27 18:20:16 +0100383 else:
384 ps.ifm_tensor = ifm_tensor
385 ps.ifm2_tensor = None
Patrik Gustavssoncc6915c2020-12-22 09:16:50 +0100386 if ps.primary_op is not None and ps.primary_op.run_on_npu:
Patrik Gustavsson224e99b2021-01-14 10:55:43 +0100387 ps.ifm_shapes.append(ifm_shapes[0])
Tim Hall79d07d22020-04-27 18:20:16 +0100388
389 ps.ofm_tensor = ofm_tensor
Patrik Gustavsson6bb8f672020-12-21 14:49:13 +0100390 ps.ofm_shapes.append(ofm_shape)
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100391
Tim Hall79d07d22020-04-27 18:20:16 +0100392 assert ps.placement != PassPlacement.Npu or ps.ofm_tensor is not None
393 ps.weight_tensor = ps.get_primary_op_ifm_weights()[1]
394 ps.scale_tensor = ps.get_primary_op_ifm_weights_biases_ofm()[2]
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200395 ps.lut_tensor = ps.get_primary_op_lut()
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200396 ps.inputs.extend(lut_list)
Tim Hall79d07d22020-04-27 18:20:16 +0100397
398 for op in ps.ops:
399 op.scheduled_pass = ps
400
401 reverse_pass_list.append(ps)
402
403 for inp, refcount in input_refcounts.items():
404 for _ in range(refcount):
405 visit_tensor(inp)
406
407 return ps
408
409 def visit_tensor(tens):
410 visit_tensor_refcount[tens] += 1
411 assert visit_tensor_refcount[tens] <= len(tens.consumers())
412 if visit_tensor_refcount[tens] == len(tens.consumers()):
413 for op in reversed(tens.ops):
414 visit_op(op, tens)
415
Jacob Bohlinfb858732020-08-17 09:42:35 +0200416 def create_primary_op(op_list):
Patrik Gustavssone3b1b912021-02-09 15:38:46 +0100417 if any(op.type in (npu_post_ops | npu_post_fuse_limited_ops) and op.run_on_npu for op in op_list):
Tim Hall79d07d22020-04-27 18:20:16 +0100418 # Configure a 1x1 AvgPool and attach the op onto it
Jacob Bohlinfb858732020-08-17 09:42:35 +0200419 op = op_list[0]
Tim Hall79d07d22020-04-27 18:20:16 +0100420 inp = op.inputs[0]
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100421 avgpool_op = create_avgpool_nop(op.name + "_avgpool")
422 avgpool_op.add_input_tensor(inp)
Tim Hall79d07d22020-04-27 18:20:16 +0100423 avgpool_out = inp.clone("_avgpooled")
424 avgpool_out.consumer_list.append(op)
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100425 avgpool_op.set_output_tensor(avgpool_out)
Patrik Gustavsson3a269202021-01-21 08:28:55 +0100426 avgpool_op.ifm_shapes = op.ifm_shapes.copy()
427 avgpool_op.ofm_shapes = op.ofm_shapes.copy()
Patrik Gustavsson56b6c712021-02-16 12:57:03 +0100428 avgpool_op.read_offsets = op.read_offsets.copy()
Tim Hall79d07d22020-04-27 18:20:16 +0100429
430 op.inputs[0] = avgpool_out
Jacob Bohlinfb858732020-08-17 09:42:35 +0200431 op_list.insert(0, avgpool_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100432
Tim Halle6ccd872020-11-09 16:46:37 +0000433 DebugDatabase.add_optimised(op, avgpool_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100434 return avgpool_op
435
436 return None
437
Patrik Gustavssonfcb1a002021-02-03 09:13:57 +0100438 def can_pack(inp, curr_op):
439 if len(inp.ops) == 1:
440 next_op = inp.ops[0]
441 for outp in next_op.outputs:
442 consumers = outp.consumers()
443 if len(consumers) > 1 or (len(consumers) == 1 and consumers[0] != curr_op):
444 return False
445
446 # There cannot be any reshaping between next_op ofm and corresponding curr_op ifm
447 if len(curr_op.ifm_shapes) != 0 and len(next_op.ofm_shapes) != 0:
448 if inp == curr_op.ifm and next_op.ofm_shapes[0] != curr_op.ifm_shapes[0]:
449 return False
450 elif (
451 curr_op.ifm2 is not None and inp == curr_op.ifm2 and next_op.ofm_shapes[0] != curr_op.ifm_shapes[1]
452 ):
453 return False
454 else:
455 return False
456
457 return True
458
Diqing Zhong2abd3dd2020-08-25 10:40:36 +0200459 def add_input_list(inp_to_add, inp_set, inp_refcnts, lut_list, ordered_inp_list):
460 if inp_to_add in inp_set:
461 if inp_refcnts[inp_to_add] == 0:
462 if inp_to_add.purpose == TensorPurpose.LUT:
463 lut_list.append(inp_to_add)
464 else:
465 ordered_inp_list.append(inp_to_add)
466 inp_refcnts[inp_to_add] += 1
467
Tim Hall79d07d22020-04-27 18:20:16 +0100468 for sg in nng.subgraphs:
469 reverse_pass_list = []
470 visit_op_refcount = collections.defaultdict(int)
471 visit_tensor_refcount = collections.defaultdict(int)
472
473 startup_list = []
474
475 for tens in sg.output_tensors:
476 visit_tensor(tens)
477
478 if startup_list:
479 startup_ps = build_pass(startup_list)
480 startup_ps.outputs = [op.outputs[0] for op in startup_list] # Need to fixup the outputs
481 startup_ps.name = "startup_weight_initialisation"
482
483 sg.passes = list(reversed(reverse_pass_list))
484 sg.build_pass_links()
485
486 if verbose_packing:
487 nng.print_passes()
488
489 return nng