blob: 4696446c9a9646b55fbe0684bcc2cbb7fe9b8674 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Early optimisation of the network graph, using the rewrite_graph module to do the traversal of the graph. These are
18# split into two parts optimise_graph_a and optimise_graph_b.
Tim Hall79d07d22020-04-27 18:20:16 +010019import math
Diego Russoea6111a2020-04-14 18:41:58 +010020
21import numpy as np
22
Louis Verhaardd7911c42020-08-25 13:36:41 +020023from . import fp_math
Louis Verhaardb9fc33c2020-08-13 11:47:36 +020024from . import lut
Diego Russoea6111a2020-04-14 18:41:58 +010025from . import rewrite_graph
Louis Verhaardd7911c42020-08-25 13:36:41 +020026from . import scaling
Diego Russoea6111a2020-04-14 18:41:58 +010027from .data_type import DataType
Louis Verhaard7db78962020-05-25 15:05:26 +020028from .errors import UnsupportedFeatureError
Dwight Lidman42fed942020-05-29 09:37:03 +020029from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Louis Verhaard8912c532020-09-30 12:11:49 +020030from .numeric_util import clamp_sigmoid
Louis Verhaarde0ef2732020-06-03 08:56:44 +020031from .numeric_util import full_shape
Louis Verhaardf03bad32020-09-25 08:30:44 +020032from .numeric_util import round_away_zero
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +010033from .operation import create_avgpool_nop
Diego Russoe8a10452020-04-21 17:39:10 +010034from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020035from .operation import Op
Diego Russoe8a10452020-04-21 17:39:10 +010036from .operation import Operation
Fredrik Svedberga0c36242020-06-03 15:43:31 +020037from .softmax import SoftMax
Tim Hall93582962020-09-09 21:58:15 +010038from .tensor import check_quantized_tens_scaling_equal
Michael McGeaghc5b549b2020-08-07 11:54:28 +010039from .tensor import create_const_tensor
40from .tensor import create_reshape_tensor
Charles Xu9a03fdf2020-07-02 15:12:40 +020041from .tensor import QuantizationParameters
Diego Russoe8a10452020-04-21 17:39:10 +010042from .tensor import Tensor
Tim Hall79d07d22020-04-27 18:20:16 +010043
Louis Verhaardaee5d752020-09-30 09:01:52 +020044passthrough_nodes = set((Op.Identity,))
Tim Hall79d07d22020-04-27 18:20:16 +010045
Louis Verhaardaee5d752020-09-30 09:01:52 +020046memory_only_ops = set((Op.Reshape,))
Michael McGeagh11b0bdb2020-09-08 11:07:35 +010047
Tim Hall79d07d22020-04-27 18:20:16 +010048
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +020049def remove_passthrough_tensor(tens, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +010050 if len(tens.ops) == 1 and tens.ops[0].type in passthrough_nodes:
51 assert len(tens.ops[0].inputs) == 1
52 tens = tens.ops[0].inputs[0]
53 return tens
54
55
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +020056def rewrite_concat(tens, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +020057 if len(tens.ops) == 1 and tens.ops[0].type.is_concat_op():
Tim Hall79d07d22020-04-27 18:20:16 +010058 concat_op = tens.ops[0]
59 if tens != concat_op.outputs[0]:
60 return tens # don't attempt to rewrite the min/max outputs of QuantizedConcat
61
62 # Not supported so leave it and run on CPU
63 if not concat_op.run_on_npu:
64 return tens
65
66 inputs, axis = concat_op.get_concat_inputs_axis()
67
68 tens.ops = []
69 offset = 0
70 for idx, inp in enumerate(inputs):
Louis Verhaardaee5d752020-09-30 09:01:52 +020071 new_op = Operation(Op.ConcatSliceWrite, concat_op.name + str(idx))
Tim Hall79d07d22020-04-27 18:20:16 +010072 new_op.inputs = [inp]
73 new_op.outputs = [tens]
74 new_op.attrs["concat_axis"] = axis
75 new_op.attrs["concat_start"] = offset
76 offset += inp.shape[axis]
77 new_op.attrs["concat_end"] = offset
78 new_op.run_on_npu = True
79 tens.ops.append(new_op)
80 assert tens.shape[axis] == offset
81
Patrik Gustavsson29d568e2020-08-18 10:11:21 +020082 # If axis corresponds to C-dimension, NHCWB16 can only be used in the output if all the concat_start's are a
83 # multiple of 16. This as, it is only then the address offset for the ofm, for all operations, will be 16 byte
84 # aligned. For other values of axis the address offsets will be 16 byte aligned, as they are all based on c = 0
Patrik Gustavsson458a2082020-08-13 13:41:05 +020085 # and those addresses are always 16 byte aligned due to the NHCWB16 format.
Patrik Gustavsson6c97e9a2020-09-23 11:02:18 +020086 if axis == -1 or axis == (len(tens.shape) - 1):
Patrik Gustavsson458a2082020-08-13 13:41:05 +020087 for op in tens.ops:
88 if op.attrs["concat_start"] % 16 != 0:
89 tens.avoid_NHCWB16 = True
90 break
91
Tim Hall79d07d22020-04-27 18:20:16 +010092 return tens
93
94
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +020095def rewrite_split(tens, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +010096
Louis Verhaardaee5d752020-09-30 09:01:52 +020097 if len(tens.ops) == 1 and tens.ops[0].type.is_split_op():
Tim Hall79d07d22020-04-27 18:20:16 +010098 split_op = tens.ops[0]
99
100 # Not supported so leave it and run on CPU
101 if not split_op.run_on_npu:
102 return tens
103
104 inp, outputs, axis, offset_start, offset_end = split_op.get_split_inputs_axis()
105
106 tens.ops = []
Louis Verhaardaee5d752020-09-30 09:01:52 +0200107 new_op = Operation(Op.SplitSliceRead, split_op.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100108 new_op.inputs = [inp]
Tim Hall79d07d22020-04-27 18:20:16 +0100109
110 # For Split the offset cannot be extracted from the tensor so it has to
111 # be calculated from the index of the output tensor
Diego Russoea6111a2020-04-14 18:41:58 +0100112 if axis is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100113 # Get the start and end of the split
114 offset_start = [0] * len(tens.shape)
115 offset_end = [0] * len(tens.shape)
116 for out in outputs:
117 if out == tens:
118 break
119 offset_start[axis] += out.shape[axis]
120
Patrik Gustavssoneebb1c22020-08-18 15:03:04 +0200121 # If start offset is not a multiple of 16 in the C-dimension, NHCWB16 need to be avoided in the input
122 if (offset_start[-1] % 16) != 0:
123 inp.avoid_NHCWB16 = True
124
Tim Hall79d07d22020-04-27 18:20:16 +0100125 offset_end[axis] = offset_start[axis] + tens.shape[axis]
126
127 new_op.attrs["split_start"] = offset_start
128 new_op.attrs["split_end"] = offset_end
129 new_op.run_on_npu = True
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100130 new_op.set_output_tensor(tens)
Tim Hall79d07d22020-04-27 18:20:16 +0100131
132 return tens
133
134
135def needed_total_padding(input_size, stride, filter_size):
136 out_size = (input_size + stride - 1) // stride
137 needed_input = (out_size - 1) * stride + filter_size
138 total_padding = max(0, needed_input - input_size)
139 return total_padding
140
141
142def calc_padding_and_skirt(padding_type, kernel_size, stride, input_dims):
143 ypad = needed_total_padding(int(input_dims[1]), int(stride[1]), int(kernel_size[0]))
144 xpad = needed_total_padding(int(input_dims[2]), int(stride[2]), int(kernel_size[1]))
145 if padding_type == b"SAME":
146 left_pad = (xpad + 0) // 2
147 right_pad = (xpad + 1) // 2
148 top_pad = (ypad + 0) // 2
149 bottom_pad = (ypad + 1) // 2
150 elif padding_type == b"VALID":
151 left_pad = 0
152 right_pad = 0
153 top_pad = 0
154 bottom_pad = 0
155 else:
Louis Verhaard7db78962020-05-25 15:05:26 +0200156 raise UnsupportedFeatureError("Unknown padding {}".format(str(padding_type)))
Tim Hall79d07d22020-04-27 18:20:16 +0100157 padding = (top_pad, left_pad, bottom_pad, right_pad)
158 skirt = (top_pad, left_pad, ypad - top_pad, xpad - left_pad)
159 return padding, skirt
160
Tim Hallc30f4952020-06-15 20:47:35 +0100161
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200162def calc_upscaled_padding_and_skirt(padding_type, kernel_size, stride, input_dims, upscaling_factor):
163 kernel_height, kernel_width = kernel_size[0], kernel_size[1]
Jacob Bohlincf7da102020-05-20 09:03:40 +0200164 if padding_type == b"SAME":
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200165 ypad = needed_total_padding(int(input_dims[1]) * upscaling_factor, int(stride[1]), int(kernel_height))
166 xpad = needed_total_padding(int(input_dims[2]) * upscaling_factor, int(stride[2]), int(kernel_width))
167
Jacob Bohlind47cc272020-08-24 11:42:14 +0200168 right_pad = max(((xpad + 1) // upscaling_factor) - 1, 0)
169 bottom_pad = max(((ypad + 1) // upscaling_factor) - 1, 0)
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200170 left_pad = max(kernel_width - 1 - right_pad, 0)
171 top_pad = max(kernel_height - 1 - bottom_pad, 0)
172
Jacob Bohlincf7da102020-05-20 09:03:40 +0200173 elif padding_type == b"VALID":
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200174 right_pad = max(kernel_width - 2, 0)
175 bottom_pad = max(kernel_height - 2, 0)
176 left_pad = kernel_width - 1
177 top_pad = kernel_height - 1
Jacob Bohlincf7da102020-05-20 09:03:40 +0200178 else:
179 assert 0, "Unknown padding"
180
181 padding = (top_pad, left_pad, bottom_pad, right_pad)
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200182 skirt = padding
Jacob Bohlincf7da102020-05-20 09:03:40 +0200183 return padding, skirt
184
Tim Hall79d07d22020-04-27 18:20:16 +0100185
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200186def fixup_conv2d_backprop(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200187 if op.type == Op.Conv2DBackpropInput:
Tim Hall79d07d22020-04-27 18:20:16 +0100188 # flip the inputs
189 op.inputs[0], op.inputs[2] = op.inputs[2], op.inputs[0]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200190 op.type = Op.Conv2DBackpropInputSwitchedBias
Jacob Bohlincf7da102020-05-20 09:03:40 +0200191
192 # Update strides
Tim Hallc30f4952020-06-15 20:47:35 +0100193 op.attrs.update({"stride_w": 1, "stride_h": 1, "strides": (1, 1, 1, 1)})
Tim Hall79d07d22020-04-27 18:20:16 +0100194
195 return op
196
197
Charles Xu9a03fdf2020-07-02 15:12:40 +0200198# Convert the op to an elementwise add
199def convert_resizebilinear_1x1_to_add(op):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200200 op.type = Op.Add
Charles Xu9a03fdf2020-07-02 15:12:40 +0200201 op.name = op.name + "_add"
Charles Xu9a03fdf2020-07-02 15:12:40 +0200202 op.attrs["resizebilinear"] = True
203 # Create an input tensor filled with zeros
204 shape = op.outputs[0].shape
205 tens = Tensor(shape, op.inputs[0].dtype, op.inputs[1].name + "_add")
206 tens.values = np.zeros(shape)
207 tens.quant_values = np.zeros(shape, np.uint8)
208 tens.quantization = QuantizationParameters(0.0, 255.0)
209 tens.quantization.scale_f32 = 1.0
210 tens.quantization.zero_point = 0
211 tens.consumer_list = [op]
212 tens_op = op.inputs[1].ops[0]
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100213 tens_op.set_output_tensor(tens)
Charles Xu9a03fdf2020-07-02 15:12:40 +0200214 # Set the add inputs
215 op.inputs[1] = op.inputs[0]
216 op.inputs[0] = tens
217
218 return op
219
220
Charles Xu87c13502020-08-06 12:17:26 +0200221# Convert ResizeBilinear to a number of 2x2 pool ops
222def convert_resizebilinear_to_2x2_pool(op):
223 count = 0
224 pre_op = op
225 outputs = op.outputs
226
227 op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)})
228 if op.attrs["align_corners"]:
229 shape_modifier = 1
230 op.attrs["padding"] = b"VALID"
231 else:
232 shape_modifier = 0
233 op.attrs["padding"] = b"SAME"
234 op.inputs[0].resampling_mode = resampling_mode.NEAREST
235
236 upscaled_shape = np.array(op.inputs[0].shape[1:3])
237 out_shape = np.array(op.outputs[0].shape[1:3])
238 if (upscaled_shape == upscaled_shape * 2 - shape_modifier).all():
239 return op
240
241 while (upscaled_shape < out_shape).all():
242 if count == 0:
243 scaled_op = pre_op
244 else:
245 scaled_op = op.clone("_{}".format(count))
246 scaled_op.inputs[0] = pre_op.outputs[0]
247
248 upscaled_shape = upscaled_shape * 2 - shape_modifier
249
250 if (upscaled_shape == out_shape).all():
251 scaled_op.outputs = outputs
252 scaled_op.outputs[0].ops = [scaled_op]
253 else:
254 shape = outputs[0].shape.copy()
255 shape[1:3] = upscaled_shape[0:2]
256 out_tens = Tensor(shape, DataType.int16, "{}_{}".format(op.outputs[0].name, count))
257 out_tens.quantization = op.outputs[0].quantization.clone()
258 out_tens.quantization.quant_min = np.iinfo(np.int16).min
259 out_tens.quantization.quant_max = np.iinfo(np.int16).max
260 scaled_op.set_output_tensor(out_tens)
261 pre_op = scaled_op
262 count += 1
263
264 # Setup the scale value
265 if scaled_op.inputs[0].dtype.bits == 8 and scaled_op.outputs[0].dtype.bits == 16:
266 scaled_op.attrs["rescale"] = 128
267 elif scaled_op.inputs[0].dtype.bits == 16 and scaled_op.outputs[0].dtype.bits == 8:
268 scaled_op.attrs["rescale"] = 1 / 128
269 elif "rescale" in scaled_op.attrs:
270 del scaled_op.attrs["rescale"]
271
272 return op
273
274
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200275def fixup_resizebilinear(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200276 if op.type == Op.ResizeBilinear and op.run_on_npu:
Charles Xu87c13502020-08-06 12:17:26 +0200277 if op.inputs[0].shape == op.outputs[0].shape:
Charles Xu36ffaf32020-08-05 15:40:44 +0200278 # Bypass nop resizebilinear
279 op.inputs = op.inputs[:1]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200280 op.type = Op.Identity
Charles Xu87c13502020-08-06 12:17:26 +0200281 elif op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1:
282 convert_resizebilinear_1x1_to_add(op)
283 else:
284 convert_resizebilinear_to_2x2_pool(op)
Charles Xu9a03fdf2020-07-02 15:12:40 +0200285
286 return op
287
288
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200289def convert_nop_split_to_identity(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200290 if op.type == Op.Split and op.attrs.get("num_splits") == 1:
Dwight Lidmanc3862c22020-09-14 15:22:33 +0200291 # the list comprehension should return a list with a single tensor
292 # if it shouldn't, remove_passthrough_tensor will fail appropriately
293 op.inputs = [i for i in op.inputs if i.shape == op.outputs[0].shape]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200294 op.type = Op.Identity
Dwight Lidmanc3862c22020-09-14 15:22:33 +0200295 return op
296
297
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200298def fixup_fully_connected_input(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200299 if op.type == Op.FullyConnected:
Tim Hall79d07d22020-04-27 18:20:16 +0100300 inp = op.inputs[0]
301 weights = op.inputs[1]
302
303 n_in_elems = weights.shape[-2]
304 elms = inp.elements()
305 batch_size = elms // n_in_elems
306 assert batch_size * n_in_elems == elms
307
308 desired_shape = [batch_size, n_in_elems]
309 if inp.shape != desired_shape:
310 # mismatch, insert a reshape to fix this.
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200311 op.set_input_tensor(create_reshape_tensor(inp, desired_shape), 0)
Tim Hall79d07d22020-04-27 18:20:16 +0100312
313 return op
314
315
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200316def convert_batched_fc_to_conv(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200317 if op.type == Op.FullyConnected:
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200318 ifm = op.inputs[0]
319 ofm = op.outputs[0]
320 # Check if the FC is 2D and first dimension indicates batching
321 if len(ifm.shape) == len(ofm.shape) == 2 and ifm.shape[0] != 1:
322 n = ifm.shape[0]
323 batching_split = {4: (2, 2), 8: (2, 4), 16: (4, 4)}
324 h, w = batching_split.get(n, (1, n))
325
326 # Convert to convolution
327 op.name += "_conv"
Louis Verhaardaee5d752020-09-30 09:01:52 +0200328 op.type = Op.Conv2DBias
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200329 op.attrs = {
330 "dilation": (1, 1, 1, 1),
331 "dilation_h_factor": 1,
332 "dilation_w_factor": 1,
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200333 "padding": b"SAME",
334 "stride_h": 1,
335 "stride_w": 1,
336 "strides": (1, 1, 1, 1),
337 }
338
339 prev_op = ifm.ops[0]
340 desired_shape = [1, h, w, ifm.shape[-1]]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200341 if len(ifm.consumer_list) == 1 and prev_op is not None and prev_op.type == Op.Reshape:
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200342 # There is a preceding Reshape
343 # Compare input of prev_op and input of op, to see if prev_op can be removed
344 ifm_prev_op = prev_op.inputs[0]
Tim Hall93582962020-09-09 21:58:15 +0100345 if ifm_prev_op.shape == ifm.shape and check_quantized_tens_scaling_equal(ifm_prev_op, ifm.quantization):
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200346 # prev_op can be removed
347 op.set_input_tensor(ifm_prev_op, 0)
348 else:
349 op.inputs[0].set_all_shapes(desired_shape)
350 prev_op.set_input_tensor(
351 create_const_tensor(prev_op.inputs[1].name, [1], DataType.int32, desired_shape), 1
352 )
353 prev_op.attrs["new_shape"] = desired_shape
354 else:
355 # Add reshape op to the input if there is no preceding reshape
356 ifm.consumer_list.remove(op)
357 op.set_input_tensor(create_reshape_tensor(ifm, desired_shape), 0)
358
359 # Reshape Weights to be 4D. IO becomes HWIO
360 weight_tensor = op.inputs[1]
361 weight_tensor.quant_values = np.expand_dims(np.expand_dims(weight_tensor.quant_values, axis=0), axis=0)
362 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
363
364 desired_shape = [1, h, w, ofm.shape[-1]]
365 if (
366 len(ofm.consumer_list) == 1
367 and ofm.consumer_list[0] is not None
Louis Verhaardaee5d752020-09-30 09:01:52 +0200368 and ofm.consumer_list[0].type == Op.Reshape
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200369 ):
370 # There is a subsequent Reshape
371 # Compare desired shape and output of consumer op, to see if consumer op can be removed
372 ofm_cons_op = ofm.consumer_list[0].outputs[0]
Tim Hall93582962020-09-09 21:58:15 +0100373 if desired_shape == ofm_cons_op.shape and check_quantized_tens_scaling_equal(ofm, ofm_cons_op):
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200374 op.outputs[0] = ofm_cons_op
375 op.outputs[0].ops = [op]
376 else:
377 op.outputs[0].set_all_shapes(desired_shape)
378 else:
379 # Add rehape op to the output
380 op.set_output_tensor(create_reshape_tensor(ofm, desired_shape, False))
381 return op
382
383
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200384def fixup_pack_input(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200385 if op.type == Op.Pack:
Tim Hall79d07d22020-04-27 18:20:16 +0100386 # Pack is also referred to as Stack
387 # Requires the rewrite_concat function to be called on the op afterwards
388 axis = int(op.attrs["axis"])
389 desired_shape = op.inputs[0].shape[:axis] + [1] + op.inputs[0].shape[axis:]
390
391 # Construct 1 shape tensor to be used by all inserted reshape ops
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100392 new_shape_tens = create_const_tensor(op.name + "_reshape_shape", [1], DataType.int32, desired_shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100393
394 for idx, inp in enumerate(op.inputs):
Tim Hall79d07d22020-04-27 18:20:16 +0100395 reshape_out = inp.clone("_reshaped")
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100396 reshape_out.set_all_shapes(desired_shape)
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100397
Louis Verhaardaee5d752020-09-30 09:01:52 +0200398 reshape_op = Operation(Op.Reshape, "{}{}_reshape".format(op.name, idx))
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100399 reshape_op.attrs["new_shape"] = desired_shape
400 reshape_op.inputs = [inp, new_shape_tens]
401 reshape_op.set_output_tensor(reshape_out)
Tim Hall79d07d22020-04-27 18:20:16 +0100402
403 op.inputs[idx] = reshape_out
404
Louis Verhaardaee5d752020-09-30 09:01:52 +0200405 op.type = Op.PackReshaped
Tim Hall79d07d22020-04-27 18:20:16 +0100406
407 return op
408
409
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200410def unfuse_activation_function(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200411 if op.type == Op.ConcatTFLite and op.run_on_npu and op.activation is not None:
412 act_op = Operation(op.activation, op.name + op.activation.name)
413 op.activation = None
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200414 out_tens = op.outputs[0]
415 intermediate_tens = out_tens.clone("_act_intermediate")
416 act_op.set_output_tensor(out_tens)
417 act_op.add_input_tensor(intermediate_tens)
418 op.set_output_tensor(intermediate_tens)
419
420 return op
421
Louis Verhaard8912c532020-09-30 12:11:49 +0200422
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200423def fixup_unpack_output(tens, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +0100424 op = tens.ops[0]
Dwight Lidmanc6ac1942020-10-02 14:55:45 +0200425 if op.run_on_npu and op.type in set((Op.Unpack, Op.StridedSlice)):
Tim Hall79d07d22020-04-27 18:20:16 +0100426 # Unpack is also referred to as Unstack
427 # Requires the rewrite_split function to be called on the op afterwards
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200428
429 reshape_input_shape = tens.shape
Louis Verhaardaee5d752020-09-30 09:01:52 +0200430 if op.type == Op.StridedSlice:
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200431 new_axis_mask = op.attrs["new_axis_mask"]
Tim Hall79d07d22020-04-27 18:20:16 +0100432 shrink_axis_mask = op.attrs["shrink_axis_mask"]
Louis Verhaard7db78962020-05-25 15:05:26 +0200433 ellipsis_mask = op.attrs["ellipsis_mask"]
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200434
435 if (new_axis_mask != 0 and shrink_axis_mask != 0) or ellipsis_mask != 0:
436 # Not supported, will be put on CPU
437 return tens
438 if shrink_axis_mask == 0 and new_axis_mask == 0:
Tim Hall79d07d22020-04-27 18:20:16 +0100439 # Equal Rank StridedSlice, no need to insert reshape
440 return tens
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200441 elif shrink_axis_mask != 0:
442 n = 0
443 axis = 0
444 while shrink_axis_mask:
445 prev_mask = shrink_axis_mask
446 n += 1
447 shrink_axis_mask &= shrink_axis_mask - 1
448 axis = int(math.log2(prev_mask - shrink_axis_mask))
449 reshape_input_shape = reshape_input_shape[:axis] + [1] + reshape_input_shape[axis:]
Tim Hall79d07d22020-04-27 18:20:16 +0100450
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200451 assert len(tens.shape) == (len(op.inputs[0].shape) - n)
452 op.attrs["shrink_axis_mask"] = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100453
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200454 elif new_axis_mask != 0:
455 n = 0
456 axis = 0
457 while new_axis_mask:
458 prev_mask = new_axis_mask
459 n += 1
460 new_axis_mask &= new_axis_mask - 1
461 axis = int(math.log2(prev_mask - new_axis_mask))
Louis Verhaard7db78962020-05-25 15:05:26 +0200462 reshape_input_shape = reshape_input_shape[:axis] + reshape_input_shape[(axis + 1) :]
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200463 new_axis_mask >>= 1
464
465 assert len(tens.shape) == (len(op.inputs[0].shape) + n)
466 op.attrs["new_axis_mask"] = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100467 else:
468 axis = int(op.attrs["axis"])
Louis Verhaardaee5d752020-09-30 09:01:52 +0200469 op.type = Op.UnpackReshaped
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200470 reshape_input_shape = tens.shape[:axis] + [1] + tens.shape[axis:]
Tim Hall79d07d22020-04-27 18:20:16 +0100471
472 # Construct 1 shape tensor to be used by all inserted reshape ops
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100473 new_shape_tens = create_const_tensor(op.name + "_reshape_shape", [1], DataType.int32, tens.shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100474
475 for idx, out_tens in enumerate(op.outputs):
Tim Hall79d07d22020-04-27 18:20:16 +0100476 reshape_in = out_tens.clone("_reshaped")
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100477 reshape_in.set_all_shapes(reshape_input_shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100478 reshape_in.ops = [op]
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100479
Louis Verhaardaee5d752020-09-30 09:01:52 +0200480 reshape_op = Operation(Op.Reshape, "{}{}_reshape".format(op.name, idx))
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100481 reshape_op.attrs["new_shape"] = reshape_input_shape
Tim Hall79d07d22020-04-27 18:20:16 +0100482 reshape_op.inputs = [reshape_in, new_shape_tens]
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100483 reshape_op.set_output_tensor(out_tens)
Tim Hall79d07d22020-04-27 18:20:16 +0100484
485 op.outputs[idx] = reshape_in
486
487 return tens
488
489
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200490def add_padding_fields(op, arch, nng):
Jacob Bohlin90033f32020-08-28 15:45:44 +0200491 if op.run_on_npu:
492 if "padding" in op.attrs:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200493 if op.type.is_conv2d_op() or op.type.is_depthwise_conv2d_op():
Jacob Bohlin90033f32020-08-28 15:45:44 +0200494 kernel_size = op.inputs[1].shape[:2]
495 input_shape = op.inputs[0].shape
Louis Verhaardaee5d752020-09-30 09:01:52 +0200496 elif op.type.is_pool_op() or op.type.npu_block_type == NpuBlockType.ReduceSum:
Jacob Bohlin90033f32020-08-28 15:45:44 +0200497 kernel_size = op.attrs["ksize"][1:3]
498 input_shape = op.inputs[0].shape
Jacob Bohlin90033f32020-08-28 15:45:44 +0200499 else:
500 raise UnsupportedFeatureError("Unknown operation that uses padding: {}".format(op.type))
Tim Hall79d07d22020-04-27 18:20:16 +0100501
Louis Verhaardaee5d752020-09-30 09:01:52 +0200502 if op.type == Op.Conv2DBackpropInputSwitchedBias:
Jacob Bohlin90033f32020-08-28 15:45:44 +0200503 upscaling_factor = op.outputs[0].shape[1] // input_shape[1]
504 padding, skirt = calc_upscaled_padding_and_skirt(
505 op.attrs["padding"], kernel_size, op.attrs["strides"], input_shape, upscaling_factor
506 )
507 else:
508 dilation_h, dilation_w = op.get_dilation_h_w()
509 dilated_kernel_size = [dilation_h * (kernel_size[0] - 1) + 1, dilation_w * (kernel_size[1] - 1) + 1]
510 padding, skirt = calc_padding_and_skirt(
511 op.attrs["padding"], dilated_kernel_size, op.attrs["strides"], input_shape
512 )
Jacob Bohlincf7da102020-05-20 09:03:40 +0200513
Jacob Bohlin90033f32020-08-28 15:45:44 +0200514 op.attrs["explicit_padding"] = padding
515 op.attrs["skirt"] = skirt
Jacob Bohlincf7da102020-05-20 09:03:40 +0200516
Tim Hall79d07d22020-04-27 18:20:16 +0100517 return op
518
519
Tim Hall79d07d22020-04-27 18:20:16 +0100520# Check if the op can be reordered
521def get_prepend_op(op):
522 inp = op.inputs[0]
523 # The op should be reordered between prev_op and prep_op
524 prev_op = inp.ops[-1]
525 prep_op = None
526 while prev_op.type in memory_only_ops and len(prev_op.outputs) == 1 and len(prev_op.outputs[0].consumers()) == 1:
527 prep_op = prev_op
528 inp = prev_op.inputs[0]
529 prev_op = inp.ops[-1]
Diego Russoea6111a2020-04-14 18:41:58 +0100530 if prev_op is not None and len(prev_op.outputs) == 1 and len(prev_op.outputs[0].consumers()) == 1:
Tim Hall79d07d22020-04-27 18:20:16 +0100531 return prep_op
532
533 return None
534
535
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200536def convert_depthwise_to_conv(op, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +0100537 # Depthwise is equivalent to a single conv2d if the ifm depth is 1 and
538 # the ofm depth equals the depth multipler.
539 # If those conditions are true, then we can perform a simple
540 # switch of the operator type (and weight order)
541
Louis Verhaardaee5d752020-09-30 09:01:52 +0200542 if op.type == Op.DepthwiseConv2DBias and (op.attrs["depth_multiplier"] != 1):
Tim Hall79d07d22020-04-27 18:20:16 +0100543 ifm_tensor = op.inputs[0]
544 weight_tensor = op.inputs[1]
545 ofm_tensor = op.outputs[0]
546 if (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"]):
547 # Change op type to Conv2d
Louis Verhaardaee5d752020-09-30 09:01:52 +0200548 op.type = Op.Conv2DBias
Tim Hall79d07d22020-04-27 18:20:16 +0100549 del op.attrs["channel_multiplier"]
550 del op.attrs["depth_multiplier"]
551
552 weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2))
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100553 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
Tim Hall79d07d22020-04-27 18:20:16 +0100554 else:
Louis Verhaard7db78962020-05-25 15:05:26 +0200555 raise UnsupportedFeatureError(
556 "Unsupported DepthwiseConv2d with depth_multiplier = {}, ifm channels = {}, ofm channels = {}".format(
Tim Hall79d07d22020-04-27 18:20:16 +0100557 op.attrs["depth_multiplier"], ifm_tensor.shape[3], ofm_tensor.shape[3]
558 )
559 )
Tim Hall79d07d22020-04-27 18:20:16 +0100560 return op
561
562
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200563def reorder_depthwise_weights(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200564 if op.type.is_depthwise_conv2d_op():
Jacob Bohline843d332020-06-23 12:12:56 +0200565 weight_tensor = op.inputs[1]
566 weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2))
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100567 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
Jacob Bohline843d332020-06-23 12:12:56 +0200568 weight_tensor.weight_transpose_depthwise = True
569
570 return op
571
572
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200573def convert_conv_to_fc(op, arch, nng):
Michael McGeagh8d939c02020-07-29 13:11:43 +0100574 # Conv 1x1 can be equivalent to Fully Connected.
575 # By representing certain convs as fully connected layers, Vela can better determine wether or not to use
576 # caching/double buffering for the weights.
577 # (Weights dont need to be reloaded for convs when IFM H and W are 1)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200578 if op.type == Op.Conv2DBias:
Michael McGeagh8d939c02020-07-29 13:11:43 +0100579 _, h, w, _ = op.inputs[0].shape
580 kh, kw, _, _ = op.inputs[1].shape
581 if h == 1 and w == 1 and kh == 1 and kw == 1:
582 # Overwrite this op as a Fully Connected Op
583 op.name += "_fc"
Louis Verhaardaee5d752020-09-30 09:01:52 +0200584 op.type = Op.FullyConnected
Michael McGeagh8d939c02020-07-29 13:11:43 +0100585 op.attrs = {
Michael McGeagh8d939c02020-07-29 13:11:43 +0100586 "weights_format": 0,
Michael McGeagh8d939c02020-07-29 13:11:43 +0100587 }
588 # Reshape Weights to be 2D. HWIO becomes just IO (as H and W are 1, they can just be dropped)
589 weight_tensor = op.inputs[1]
590 weight_tensor.quant_values = weight_tensor.quant_values.squeeze(axis=(0, 1))
591 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
592 # The output from a fully connected is expected to be 2D so we need to add a reshape layer to convert it
593 # back to 4D afterwards as the next layer is expecting that shape
594 orig_ofm_tensor = op.outputs[0]
595 # Reshape this ops output to be 2D: {(N*H*W), C} (We know N H and W are all 1 so this becomes {1, C})
596 fc_ofm_tensor = orig_ofm_tensor.clone("_fc")
597 fc_ofm_tensor.set_all_shapes([1, fc_ofm_tensor.shape[-1]])
598 fc_ofm_tensor.ops = [op]
599 # Add a reshape after the new OFM to convert it back to the original 4D shape
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100600 reshape_name = op.name + "_reshape"
601 new_shape_tens = create_const_tensor(reshape_name + "_shape", [1], DataType.int32, orig_ofm_tensor.shape)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200602 reshape_op = Operation(Op.Reshape, reshape_name)
Michael McGeagh8d939c02020-07-29 13:11:43 +0100603 reshape_op.attrs["new_shape"] = orig_ofm_tensor.shape
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100604 reshape_op.inputs = [fc_ofm_tensor, new_shape_tens]
605 reshape_op.set_output_tensor(orig_ofm_tensor)
Michael McGeagh8d939c02020-07-29 13:11:43 +0100606 # Replace this ops OFM to point to the 2D tensor
607 op.outputs[0] = fc_ofm_tensor
608 return op
609
610
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200611def fixup_relus_with_differing_ifm_ofm_scaling(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200612 if op.run_on_npu and op.type.is_relu_op():
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100613 ifm = op.inputs[0]
614 ofm = op.outputs[0]
615 # Relu with differing IFM and OFM scaling cannot be fused with another primary op
616 # and requires its own to be inserted
Tim Hall93582962020-09-09 21:58:15 +0100617 if not check_quantized_tens_scaling_equal(ifm, ofm):
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100618 # Override this op with its own primary op (avgpool)
619 relu_fused_op = create_avgpool_nop(op.name + "_avgpool")
620 # And fuse the original activation function to it
Louis Verhaardaee5d752020-09-30 09:01:52 +0200621 relu_fused_op.activation = op.type
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100622 # Tidy up and assign the ifm and ofm to the new op
623 ifm.consumer_list.remove(op)
Andreas Nevalainenf3d737e2020-09-25 14:12:43 +0200624
625 # if not 4d, reshape ifm/ofm
626 if len(ifm.shape) < 4:
627 ifm_shaped = create_reshape_tensor(ifm, full_shape(4, ifm.shape, 1))
628 ifm = ifm_shaped
629 if len(ofm.shape) < 4:
630 ofm_shaped = create_reshape_tensor(ofm, full_shape(4, ofm.shape, 1), False)
631 ofm = ofm_shaped
632
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100633 relu_fused_op.add_input_tensor(ifm)
634 relu_fused_op.set_output_tensor(ofm)
635 op = relu_fused_op
636 return op
637
638
Tim Hall79d07d22020-04-27 18:20:16 +0100639# Reorder activation op if it's after the memory only operations
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200640def fixup_act_reorder(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200641 if op.type.is_relu_op() or op in set((Op.Sigmoid, Op.Tanh)):
Tim Hall79d07d22020-04-27 18:20:16 +0100642 prep_op = get_prepend_op(op)
Diego Russoea6111a2020-04-14 18:41:58 +0100643 if prep_op is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100644 act_op = op.clone("_reordered")
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200645
646 # There is only one input tensor, overwrite it
647 act_op.set_input_tensor(prep_op.inputs[0], 0)
648
Tim Hall79d07d22020-04-27 18:20:16 +0100649 act_op_out = act_op.inputs[0].clone("_acted")
650 act_op_out.quantization = op.outputs[0].quantization.clone()
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100651 act_op.set_output_tensor(act_op_out)
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200652
653 # Update the consumer list
654 act_op_out.consumer_list = op.outputs[0].consumer_list.copy()
655 act_op_out.consumer_list.append(prep_op)
656
Tim Hall79d07d22020-04-27 18:20:16 +0100657 prep_op.inputs[0] = act_op_out
658 prep_op.outputs[0].quantization = act_op_out.quantization.clone()
659
660 # Mark the op so that it will be removed as passthrough later on
Louis Verhaardaee5d752020-09-30 09:01:52 +0200661 op.type = Op.Identity
Tim Hall79d07d22020-04-27 18:20:16 +0100662 return op
663
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200664
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200665def fixup_elementwise_with_scalars(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200666 if op.type.is_binary_elementwise_op():
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200667 ifm_tensor, ifm2_tensor, _, _ = op.get_ifm_ifm2_weights_ofm()
Charles Xu78792222020-05-13 10:15:26 +0200668 if ifm2_tensor.shape != [] and ifm_tensor.shape != []:
669 diff = len(ifm_tensor.shape) - len(ifm2_tensor.shape)
670 if diff > 0:
671 ifm2_tensor.shape = full_shape(len(ifm_tensor.shape), ifm2_tensor.shape, 1)
672 elif diff < 0:
673 ifm_tensor.shape = full_shape(len(ifm2_tensor.shape), ifm_tensor.shape, 1)
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200674 elif ifm_tensor.shape == [] and ifm_tensor.quant_values is None:
675 # IFM is marked as a scalar, but is a result of an operation; change it to a shape of size 1
676 ifm_tensor.shape = len(ifm2_tensor.shape) * [1]
677 ifm_tensor.storage_shape = ifm_tensor.shape
678 elif ifm2_tensor.shape == [] and ifm2_tensor.quant_values is None:
679 # IFM2 is marked as a scalar, but is a result of an operation; change it to a shape of size 1
680 ifm2_tensor.shape = len(ifm_tensor.shape) * [1]
681 ifm2_tensor.storage_shape = ifm2_tensor.shape
Charles Xu78792222020-05-13 10:15:26 +0200682 return op
Tim Hall79d07d22020-04-27 18:20:16 +0100683
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200684
Tim Hall4e127762020-05-15 16:05:49 +0100685# Set input/output tensor equivalence to the same id for memory operations
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200686def set_tensor_equivalence(op, arch, nng):
Michael McGeagh11b0bdb2020-09-08 11:07:35 +0100687 if op.type in memory_only_ops:
Tim Hall4e127762020-05-15 16:05:49 +0100688 eid = op.outputs[0].equivalence_id
689 for inp in op.inputs:
690 inp.equivalence_id = eid
691 return op
692
693
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200694def convert_softmax(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200695 if op.type == Op.Softmax and op.run_on_npu:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200696 softmax = SoftMax(op)
697 op = softmax.get_graph()
698 return op
699
700
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200701def convert_mul_max_to_abs_or_lrelu(op, arch, nng):
Diego Russoea6111a2020-04-14 18:41:58 +0100702 r"""Whenever there is a subgraph with this topology:
Tim Hall79d07d22020-04-27 18:20:16 +0100703
704 Input X For X = -1 or X > 0
705 | \ / This subgraph can be replaced with either
706 | Mul an Abs (if X = -1) or a LeakyReLU (if X > 0)
707 | /
708 Max
709 """
710
Louis Verhaardaee5d752020-09-30 09:01:52 +0200711 if op.type == Op.Maximum:
Tim Hall79d07d22020-04-27 18:20:16 +0100712 # finds the Mul input(s) to the Max
Louis Verhaardaee5d752020-09-30 09:01:52 +0200713 muls = [i for i in op.inputs if i.ops[0].type == Op.Mul]
Tim Hall79d07d22020-04-27 18:20:16 +0100714 if len(muls) == 1:
715 mul = muls[0].ops[0]
716 elif len(muls) == 2:
717 # In the case both inputs are Muls, find the one with the same input as the Max
718 mul = [m for m in muls if len(set(op.inputs + m.ops[0].inputs)) == 1][0].ops[0]
719 else:
720 # No Mul inputs
721 return op
722
723 # make sure the Mul doesn't have any other consumers
Louis Verhaardd7911c42020-08-25 13:36:41 +0200724 mul_ofm = mul.outputs[0]
725 if len(mul_ofm.consumers()) != 1:
Tim Hall79d07d22020-04-27 18:20:16 +0100726 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200727 # make sure the Mul doesn't have a fused activation function
728 if mul.activation:
Tim Hall79d07d22020-04-27 18:20:16 +0100729 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200730 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100731 if ifm is None or ofm is None:
732 return op
733
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200734 if ifm.dtype not in (DataType.uint8, DataType.int8) or ifm.dtype != ofm.dtype:
735 return op
Tim Hall93582962020-09-09 21:58:15 +0100736 if not check_quantized_tens_scaling_equal(ifm, ofm) or not check_quantized_tens_scaling_equal(ifm, mul_ofm):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200737 # rewrite to LeakyRelu currently only makes sense if the quantization is identical
738 return op
Tim Hall79d07d22020-04-27 18:20:16 +0100739
740 # finds the branched input that goes to both the Max and the Mul
741 shared = set(op.inputs) & set(mul.inputs)
742 if len(shared) == 1:
743 shared_in = shared.pop()
744 # find the constant scalar input to the Mul
745 const_tens = (set(mul.inputs) - {shared_in}).pop()
746 # check that it is a scalar
747 if const_tens.shape != []:
748 return op
749 const = const_tens.ops[0]
750 # check that it is a constant
Louis Verhaardaee5d752020-09-30 09:01:52 +0200751 if const.type != Op.Const:
Tim Hall79d07d22020-04-27 18:20:16 +0100752 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200753 # Remove the Mul from the shared input's consumers
754 shared_in.consumer_list.remove(mul)
Tim Hall79d07d22020-04-27 18:20:16 +0100755 else:
756 return op
757
758 val = const.outputs[0].values
759 if val >= 0:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200760 new_op = Op.LeakyRelu
Tim Hall79d07d22020-04-27 18:20:16 +0100761 op.attrs["alpha"] = val
Louis Verhaardd7911c42020-08-25 13:36:41 +0200762 # to produce bit exact results, the alpha is not enough;
763 # save additional scaling info in attr "alpha_scale", to be used as input
764 # to the LUT construction
765 alpha_scalar = const_tens.quant_values - const_tens.quantization.zero_point
766 mul_ifm_scale = np.double(ifm.quantization.scale_f32)
767 mul_ifm2_scale = np.double(const_tens.quantization.scale_f32)
768 mul_ofm_scale = np.double(mul_ofm.quantization.scale_f32)
769 alpha_scale, alpha_shift = scaling.elementwise_mul_scale(mul_ifm_scale, mul_ifm2_scale, mul_ofm_scale)
770 op.attrs["alpha_scaling"] = (alpha_scalar, alpha_scale, alpha_shift)
Tim Hall79d07d22020-04-27 18:20:16 +0100771 elif val == -1:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200772 new_op = Op.Abs
Tim Hall79d07d22020-04-27 18:20:16 +0100773 else:
774 return op
775
Louis Verhaardaee5d752020-09-30 09:01:52 +0200776 op.type = new_op
777 op.name = op.name.replace("Maximum", new_op.name)
778 op.outputs[0].name = op.outputs[0].name.replace("Maximum", new_op.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100779 op.inputs = [shared_in]
780 return op
781
782
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200783def convert_lrelu_to_mul_max(op, arch):
784 # Converts LeakyRelu to Max(alpha * IFM, identity * IFM)
785 # (the opposite of convert_mul_max_to_abs_or_lrelu)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200786 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100787 if ifm is None or ofm is None:
788 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200789
790 # Add multiplication with alpha
Louis Verhaardaee5d752020-09-30 09:01:52 +0200791 mul_alpha = Operation(Op.Mul, op.name + "_mul_alpha")
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200792 mul_alpha.add_input_tensor(ifm)
793 # Create const tensor containing alpha as scalar
794 alpha = op.attrs["alpha"]
795 quantization = ifm.quantization.clone()
796 quantization.min = 0
797 quantization.max = alpha * (quantization.quant_max - quantization.quant_min)
798 quantization.scale_f32 = alpha
799 quantization.zero_point = 0
800 alpha_tens = create_const_tensor(op.name + "_alpha_scalar", [], ifm.dtype, [1], np.int8, quantization=quantization)
801 mul_alpha.add_input_tensor(alpha_tens)
802 fm_alpha = ofm.clone(op.name + "_alpha")
803 mul_alpha.set_output_tensor(fm_alpha)
804
Tim Hall93582962020-09-09 21:58:15 +0100805 if check_quantized_tens_scaling_equal(ifm, ofm):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200806 # No identity multiplication is needed
807 fm_id = ifm
808 else:
809 # Add multiplication with identity
Louis Verhaardaee5d752020-09-30 09:01:52 +0200810 mul_identity = Operation(Op.Mul, op.name + "_mul_identity")
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200811 mul_identity.add_input_tensor(ifm)
812 # Create const tensor containing identity as scalar
813 quantization = ifm.quantization.clone()
814 quantization.min = 0
815 quantization.max = quantization.quant_max - quantization.quant_min
816 quantization.scale_f32 = 1
817 quantization.zero_point = 0
818 identity_tens = create_const_tensor(
819 op.name + "_id_scalar", [], ifm.dtype, [1], np.uint8, quantization=quantization
820 )
821 mul_identity.add_input_tensor(identity_tens)
822 fm_id = ofm.clone(op.name + "_id")
823 mul_identity.set_output_tensor(fm_id)
824
825 # Convert LeakyRelu to Max, add the results of the multiplication(s) as inputs
Louis Verhaardaee5d752020-09-30 09:01:52 +0200826 op.type = Op.Maximum
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200827 op.name = op.name.replace("LeakyRelu", "Maximum")
828 op.inputs = []
829 ifm.consumer_list.remove(op)
830 op.add_input_tensor(fm_alpha)
831 op.add_input_tensor(fm_id)
832 return op
833
834
Louis Verhaard2e186c72020-10-09 10:47:04 +0200835def convert_to_lut(op, lut_values, lut_name):
Louis Verhaardf03bad32020-09-25 08:30:44 +0200836 # Rewrite the operation by Add with scalar 0 + LUT activation
837 ifm = op.inputs[0]
Tim Hall93582962020-09-09 21:58:15 +0100838 if ifm is None:
839 return op
Louis Verhaard58520b92020-08-24 16:45:38 +0200840 assert ifm.dtype.size_in_bytes() == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200841 op.type = Op.Add
Louis Verhaard2e186c72020-10-09 10:47:04 +0200842 op.name = op.name + "_lut_" + lut_name
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200843 # Mark as no-op to enable potential fusing optimizations
844 op.attrs["is_nop"] = True
845 # Create an input tensor containing scalar zero
846 quantization = QuantizationParameters(0.0, 255.0)
Louis Verhaardd7911c42020-08-25 13:36:41 +0200847 quantization.scale_f32 = ifm.quantization.scale_f32
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200848 quantization.zero_point = 0
Louis Verhaard2e186c72020-10-09 10:47:04 +0200849 tens = create_const_tensor(op.inputs[0].name + "_scalar0", [], ifm.dtype, [0], np.uint8, quantization=quantization)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200850 op.add_input_tensor(tens)
Louis Verhaardf03bad32020-09-25 08:30:44 +0200851 # The LUT must be applied without any preceding rescaling (the LUT itself performs the rescale),
852 # so even if the OFM has a different scale than the IFM, the generated OFM scale instructions
853 # should be the same as the IFM
Louis Verhaardaee5d752020-09-30 09:01:52 +0200854 op.forced_output_quantization = ifm.quantization
Louis Verhaard2e186c72020-10-09 10:47:04 +0200855 lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8)
Louis Verhaardf03bad32020-09-25 08:30:44 +0200856 op.set_activation_lut(lut_tensor)
857 return op
858
859
Louis Verhaard2e186c72020-10-09 10:47:04 +0200860def convert_to_lut8(op, fn, fn_name):
Louis Verhaardf03bad32020-09-25 08:30:44 +0200861 # Converts op to a no-op + int8/uint8 LUT which is generated with the given function.
862 # fn is a function(real) -> real
Louis Verhaardaee5d752020-09-30 09:01:52 +0200863 ifm, ofm = op.get_ifm_ofm()
Louis Verhaardf03bad32020-09-25 08:30:44 +0200864 if ifm.dtype not in (DataType.uint8, DataType.int8) or ifm.dtype != ofm.dtype:
865 return op
866 # Generate the LUT
867 ifm_scale = np.double(ifm.quantization.scale_f32)
868 ofm_scale = np.double(ofm.quantization.scale_f32)
869 zp_in = ifm.quantization.zero_point
870 zp_out = ofm.quantization.zero_point
871 values = []
872 ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128)
873 quantized_min = min(ix)
874 quantized_max = max(ix)
875 for x in ix:
876 x_real = ifm_scale * (x - zp_in)
877 y_real = fn(x_real)
878 lut_result = round_away_zero(zp_out + y_real / ofm_scale)
879 lut_result = min(quantized_max, max(quantized_min, lut_result))
880 values.append(lut_result)
Louis Verhaard2e186c72020-10-09 10:47:04 +0200881 return convert_to_lut(op, values, fn_name)
Louis Verhaardf03bad32020-09-25 08:30:44 +0200882
883
884def convert_lrelu_to_lut(op, arch):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200885 ifm, ofm = op.get_ifm_ofm()
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200886 # Generate the LUT
Louis Verhaardd7911c42020-08-25 13:36:41 +0200887 alpha = op.attrs["alpha"]
888 ifm_scale = np.double(ifm.quantization.scale_f32)
889 ofm_scale = np.double(ofm.quantization.scale_f32)
890 zp_in = ifm.quantization.zero_point
891 zp_out = ofm.quantization.zero_point
892 identity_scale, identity_shift = scaling.elementwise_mul_scale(ifm_scale, 1, ofm_scale)
893 alpha_scalar = 1
894 alpha_scale, alpha_shift = scaling.elementwise_mul_scale(ifm_scale, alpha, ofm_scale)
895 if "alpha_scaling" in op.attrs:
896 # The LeakyRelu was the result from convert_mul_max_to_abs_or_lrelu
897 alpha_scalar, alpha_scale, alpha_shift = op.attrs["alpha_scaling"]
898 values = []
Louis Verhaard58520b92020-08-24 16:45:38 +0200899 ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128)
Louis Verhaardd7911c42020-08-25 13:36:41 +0200900 quantized_min = min(ix)
901 quantized_max = max(ix)
902 for x in ix:
903 if x < zp_in:
904 lut_result = zp_out + fp_math.multiply_by_quantized_multiplier(
905 alpha_scalar * (x - zp_in), alpha_scale, alpha_shift
906 )
907 else:
908 lut_result = zp_out + fp_math.multiply_by_quantized_multiplier(x - zp_in, identity_scale, identity_shift)
909 lut_result = min(quantized_max, max(quantized_min, lut_result))
910 values.append(lut_result)
Louis Verhaard2e186c72020-10-09 10:47:04 +0200911 return convert_to_lut(op, values, "lrelu")
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200912
913
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200914def convert_lrelu(op, arch, nng):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200915 # Converts LeakyRelu to a LUT based solution if possible, otherwise a mul + max
Louis Verhaardaee5d752020-09-30 09:01:52 +0200916 if op.type != Op.LeakyRelu:
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200917 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200918 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100919 if ifm is None or ofm is None:
920 return op
Louis Verhaardd7911c42020-08-25 13:36:41 +0200921 if ifm.dtype in (DataType.uint8, DataType.int8) and ifm.dtype == ofm.dtype:
922 # use LUT for int8/uint8
923 return convert_lrelu_to_lut(op, arch)
Tim Hall93582962020-09-09 21:58:15 +0100924 if check_quantized_tens_scaling_equal(ifm, ofm) and ifm.dtype == ofm.dtype == DataType.int16:
Louis Verhaardd7911c42020-08-25 13:36:41 +0200925 # use LeakyRelu unmodified for int16 with equal input/output scaling
926 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200927 return convert_lrelu_to_mul_max(op, arch)
928
929
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200930def convert_tanh_sigmoid_to_lut(op, arch, nng):
Louis Verhaardf03bad32020-09-25 08:30:44 +0200931 # Converts int8/uint8 Sigmoid and Tanh to a LUT based solution
Louis Verhaardaee5d752020-09-30 09:01:52 +0200932 if op.type == Op.Sigmoid:
Louis Verhaard2e186c72020-10-09 10:47:04 +0200933 return convert_to_lut8(op, clamp_sigmoid, "sigmoid")
Louis Verhaardaee5d752020-09-30 09:01:52 +0200934 elif op.type == Op.Tanh:
Louis Verhaard2e186c72020-10-09 10:47:04 +0200935 return convert_to_lut8(op, math.tanh, "tanh")
Louis Verhaardf03bad32020-09-25 08:30:44 +0200936 return op
937
938
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200939def remove_unwanted_reshapes(op, arch, nng):
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200940 # Try to remove reshapes enclosing ElementWise operator with only one non-constant input
Louis Verhaardaee5d752020-09-30 09:01:52 +0200941 if not op.run_on_npu or not op.type.is_elementwise_op():
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200942 return op
943
944 # Check if the ElementWise operator only have one non-constant input
Louis Verhaardaee5d752020-09-30 09:01:52 +0200945 non_const_tens = [x for x in op.inputs if x.ops[0].type != Op.Const]
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200946 if len(non_const_tens) != 1:
947 return op
948 ifm = non_const_tens[0]
949
950 # Check if operation is enclosed by Reshapes that can be removed
951 ofm = op.outputs[0]
952 prev_op = ifm.ops[0]
953 if (
954 len(ifm.consumer_list) == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200955 and prev_op.type == Op.Reshape
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200956 and len(ofm.consumer_list) == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200957 and ofm.consumer_list[0].type == Op.Reshape
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200958 ):
959 # Operation is enclosed by reshapes, check if they can be removed
Louis Verhaardaee5d752020-09-30 09:01:52 +0200960 prev_op_ifm, prev_op_ofm = prev_op.get_ifm_ofm()
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200961 cons_op = ofm.consumer_list[0]
962 cons_op_ifm = ofm
963 cons_op_ofm = cons_op.outputs[0]
964 if len(prev_op_ifm.shape) == len(cons_op_ofm.shape):
965 # Check if quantization is the same in the input and output for the reshape ops
Tim Hall93582962020-09-09 21:58:15 +0100966 if check_quantized_tens_scaling_equal(prev_op_ifm, prev_op_ofm) and check_quantized_tens_scaling_equal(
967 cons_op_ifm, cons_op_ofm
968 ):
Patrik Gustavsson7ad862a2020-09-29 14:09:43 +0200969 op.set_input_tensor(prev_op_ifm, 0)
970 op.set_output_tensor(cons_op_ofm)
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200971 return op
972
973
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200974def fuse_activation_function_with_prev(op, arch, nng):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200975 # if op is a no-op: attempts to move the activation function to the preceding op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200976 if not op.attrs.get("is_nop", False) or op.activation is None:
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200977 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200978 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100979 if ifm is None or ofm is None:
980 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200981 # finds the input(s) to the operation
982 prev_op = ifm.ops[0]
983 # Note: the below checks on prev_op require that a first optimize pass on the full graph has been performed
984 fuse = (
985 prev_op.run_on_npu
Louis Verhaardaee5d752020-09-30 09:01:52 +0200986 and prev_op.type.npu_block_type != NpuBlockType.Default
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200987 and len(ifm.ops) == 1
988 and len(prev_op.outputs[0].consumers()) == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200989 and prev_op.activation is None
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200990 )
991 if op.activation_lut is not None and arch.shram_reserved_unused_banks == 0:
992 # TODO: if SHRAM LUT space is shared with SHRAM ACC (32, 64 MAC),
993 # LUT currently only works correctly for elementwise ops
994 fuse = False
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200995 if not fuse:
996 return op
997 # Move the fused activation function + corresponding info to prev_op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200998 prev_op.activation = op.activation
999 prev_op.forced_output_quantization = op.forced_output_quantization
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001000 if op.activation_lut is not None:
1001 prev_op.set_activation_lut(op.activation_lut)
1002 # Bypass op
Louis Verhaard98a34992020-09-01 10:39:04 +02001003 prev_op.set_output_tensor(ofm)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001004 return op
1005
1006
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001007def add_attrs_to_resizebilinear(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +02001008 if op.type == Op.ResizeBilinear and op.run_on_npu:
Dwight Lidman42fed942020-05-29 09:37:03 +02001009 input_tensor = op.inputs[0]
1010 upscaled_shape = [input_tensor.shape[1] * 2, input_tensor.shape[2] * 2]
1011 out_shape = op.outputs[0].shape[1:3]
1012 if not op.attrs["align_corners"] and out_shape == upscaled_shape:
1013 # this means the output is supposed to be a x2 upscale,
1014 # so we need to do SAME padding
1015 op.attrs["padding"] = b"SAME"
1016 elif op.attrs["align_corners"] and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
1017 # here we can just run the avg pool without padding and
1018 # produce a (M * 2 - 1, N * 2 - 1) sized output
1019 op.attrs["padding"] = b"VALID"
1020 else:
Charles Xu9a03fdf2020-07-02 15:12:40 +02001021 return op
Dwight Lidman42fed942020-05-29 09:37:03 +02001022 input_tensor.resampling_mode = resampling_mode.NEAREST
Tim Hallc30f4952020-06-15 20:47:35 +01001023 op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)})
Dwight Lidman42fed942020-05-29 09:37:03 +02001024 return op
1025
1026
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001027def fixup_bias_tensors(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +02001028 if op.type.needs_bias() and op.bias is None:
Jacob Bohlina41cd4d2020-08-26 18:21:28 +02001029 # Op has no bias, add bias tensor filled with zeros
1030 nr_biases = op.inputs[1].shape[-1]
1031 bias_values = [0] * nr_biases
1032 bias_tensor = create_const_tensor(op.name + "_bias", [nr_biases], DataType.int32, bias_values)
1033 bias_tensor.quant_values = bias_tensor.values
1034 op.set_input_tensor(bias_tensor, -1)
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +02001035
1036 return op
1037
1038
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001039def supported_operator_check(op, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +01001040 op.run_on_npu = arch.supported_operators.is_operator_supported(op)
1041 return op
1042
1043
1044def optimise_graph_a(nng, arch, verbose_graph=False):
1045 if verbose_graph:
1046 nng.print_graph()
1047
1048 op_rewrite_list = [
Tim Hall4e127762020-05-15 16:05:49 +01001049 set_tensor_equivalence,
Tim Hall79d07d22020-04-27 18:20:16 +01001050 supported_operator_check,
1051 # then do any rewrites of supported operators
1052 convert_depthwise_to_conv,
Michael McGeagh8d939c02020-07-29 13:11:43 +01001053 convert_conv_to_fc,
Fredrik Svedberga0c36242020-06-03 15:43:31 +02001054 convert_softmax,
Tim Hall79d07d22020-04-27 18:20:16 +01001055 fixup_fully_connected_input,
Patrik Gustavssoncb337042020-09-16 14:55:40 +02001056 convert_batched_fc_to_conv,
Tim Hall79d07d22020-04-27 18:20:16 +01001057 fixup_pack_input,
Fredrik Svedberg0f98b362020-09-29 10:00:39 +02001058 unfuse_activation_function,
Tim Hall79d07d22020-04-27 18:20:16 +01001059 fixup_conv2d_backprop,
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +01001060 fixup_relus_with_differing_ifm_ofm_scaling,
Tim Hall79d07d22020-04-27 18:20:16 +01001061 fixup_act_reorder,
Charles Xu78792222020-05-13 10:15:26 +02001062 fixup_elementwise_with_scalars,
Jacob Bohline843d332020-06-23 12:12:56 +02001063 reorder_depthwise_weights,
Charles Xu9a03fdf2020-07-02 15:12:40 +02001064 fixup_resizebilinear,
Jacob Bohlina41cd4d2020-08-26 18:21:28 +02001065 fixup_bias_tensors,
Dwight Lidmanc3862c22020-09-14 15:22:33 +02001066 convert_nop_split_to_identity,
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001067 convert_mul_max_to_abs_or_lrelu,
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001068 remove_unwanted_reshapes,
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001069 convert_lrelu,
Louis Verhaardf03bad32020-09-25 08:30:44 +02001070 convert_tanh_sigmoid_to_lut,
Tim Hall79d07d22020-04-27 18:20:16 +01001071 ]
1072
1073 for idx, sg in enumerate(nng.subgraphs):
1074 # rewrite graph pass
1075 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
Dwight Lidmanc6ac1942020-10-02 14:55:45 +02001076 nng, sg, arch, [], op_rewrite_list, rewrite_unsupported=False,
Tim Hall79d07d22020-04-27 18:20:16 +01001077 )
1078
1079 for idx, sg in enumerate(nng.subgraphs):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001080 # remove passthrough tensors and attempt further optimizations
1081 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001082 nng, sg, arch, [remove_passthrough_tensor], [fuse_activation_function_with_prev, add_padding_fields]
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001083 )
Tim Hall79d07d22020-04-27 18:20:16 +01001084
1085 if verbose_graph:
1086 nng.print_graph()
1087 return nng
1088
Diego Russoea6111a2020-04-14 18:41:58 +01001089
Tim Hall79d07d22020-04-27 18:20:16 +01001090def optimise_graph_b(nng, arch, verbose_graph=False):
1091 if verbose_graph:
1092 nng.print_graph()
1093
1094 for idx, sg in enumerate(nng.subgraphs):
1095 # combined rewrite graph pass
Dwight Lidmanc6ac1942020-10-02 14:55:45 +02001096 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
1097 nng, sg, arch, [fixup_unpack_output, rewrite_concat, rewrite_split], []
1098 )
Tim Hall79d07d22020-04-27 18:20:16 +01001099
1100 if verbose_graph:
1101 nng.print_graph()
1102 return nng