blob: 0754f7e121f642a3da968533db30e6fac316e181 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Early optimisation of the network graph, using the rewrite_graph module to do the traversal of the graph. These are
18# split into two parts optimise_graph_a and optimise_graph_b.
Tim Hall79d07d22020-04-27 18:20:16 +010019import math
Diego Russoea6111a2020-04-14 18:41:58 +010020
21import numpy as np
22
Louis Verhaardd7911c42020-08-25 13:36:41 +020023from . import fp_math
Louis Verhaardb9fc33c2020-08-13 11:47:36 +020024from . import lut
Diego Russoea6111a2020-04-14 18:41:58 +010025from . import rewrite_graph
Louis Verhaardd7911c42020-08-25 13:36:41 +020026from . import scaling
Diego Russoea6111a2020-04-14 18:41:58 +010027from .data_type import DataType
Tim Halle6ccd872020-11-09 16:46:37 +000028from .debug_database import DebugDatabase
Louis Verhaard7db78962020-05-25 15:05:26 +020029from .errors import UnsupportedFeatureError
Dwight Lidman42fed942020-05-29 09:37:03 +020030from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Louis Verhaard8912c532020-09-30 12:11:49 +020031from .numeric_util import clamp_sigmoid
Louis Verhaarde0ef2732020-06-03 08:56:44 +020032from .numeric_util import full_shape
Louis Verhaardf03bad32020-09-25 08:30:44 +020033from .numeric_util import round_away_zero
Louis Verhaarde8a5a782020-11-02 18:04:27 +010034from .operation import create_activation_function
Diego Russoe8a10452020-04-21 17:39:10 +010035from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020036from .operation import Op
Diego Russoe8a10452020-04-21 17:39:10 +010037from .operation import Operation
Michael McGeagh16895482020-12-14 15:51:20 +000038from .operation import Padding
Fredrik Svedbergd9c2c422020-12-01 16:33:45 +010039from .operation_util import create_avgpool_nop
patrik.gustavssoneeb85152020-12-21 17:10:40 +000040from .shape4d import Shape4D
Fredrik Svedberga0c36242020-06-03 15:43:31 +020041from .softmax import SoftMax
Tim Hall93582962020-09-09 21:58:15 +010042from .tensor import check_quantized_tens_scaling_equal
Michael McGeaghc5b549b2020-08-07 11:54:28 +010043from .tensor import create_const_tensor
44from .tensor import create_reshape_tensor
Charles Xu9a03fdf2020-07-02 15:12:40 +020045from .tensor import QuantizationParameters
Diego Russoe8a10452020-04-21 17:39:10 +010046from .tensor import Tensor
Michael McGeagh7a6f8432020-12-02 15:29:22 +000047from .tflite_mapping import optype_to_builtintype
Tim Hall79d07d22020-04-27 18:20:16 +010048
Michael McGeaghf3e3ad72020-12-02 12:39:03 +000049passthrough_nodes = (Op.Identity,)
Tim Hall79d07d22020-04-27 18:20:16 +010050
Michael McGeaghf3e3ad72020-12-02 12:39:03 +000051memory_only_ops = (Op.Reshape,)
Michael McGeagh11b0bdb2020-09-08 11:07:35 +010052
Tim Hall79d07d22020-04-27 18:20:16 +010053
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +020054def remove_passthrough_tensor(tens, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +010055 if len(tens.ops) == 1 and tens.ops[0].type in passthrough_nodes:
56 assert len(tens.ops[0].inputs) == 1
57 tens = tens.ops[0].inputs[0]
58 return tens
59
60
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +020061def rewrite_concat(tens, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +020062 if len(tens.ops) == 1 and tens.ops[0].type.is_concat_op():
Tim Hall79d07d22020-04-27 18:20:16 +010063 concat_op = tens.ops[0]
64 if tens != concat_op.outputs[0]:
65 return tens # don't attempt to rewrite the min/max outputs of QuantizedConcat
66
67 # Not supported so leave it and run on CPU
68 if not concat_op.run_on_npu:
69 return tens
70
71 inputs, axis = concat_op.get_concat_inputs_axis()
72
73 tens.ops = []
74 offset = 0
75 for idx, inp in enumerate(inputs):
Louis Verhaardaee5d752020-09-30 09:01:52 +020076 new_op = Operation(Op.ConcatSliceWrite, concat_op.name + str(idx))
Tim Hall79d07d22020-04-27 18:20:16 +010077 new_op.inputs = [inp]
78 new_op.outputs = [tens]
Patrik Gustavsson2349d422020-12-01 16:02:29 +010079 new_op.attrs["concat_axis"] = axis + (4 - len(inp.shape))
Tim Hall79d07d22020-04-27 18:20:16 +010080 new_op.attrs["concat_start"] = offset
81 offset += inp.shape[axis]
82 new_op.attrs["concat_end"] = offset
83 new_op.run_on_npu = True
84 tens.ops.append(new_op)
Tim Halle6ccd872020-11-09 16:46:37 +000085 DebugDatabase.add_optimised(concat_op, new_op)
patrik.gustavssoneeb85152020-12-21 17:10:40 +000086 new_op.set_ifm_ofm_shapes()
Tim Hall79d07d22020-04-27 18:20:16 +010087 assert tens.shape[axis] == offset
88
Patrik Gustavsson29d568e2020-08-18 10:11:21 +020089 # If axis corresponds to C-dimension, NHCWB16 can only be used in the output if all the concat_start's are a
90 # multiple of 16. This as, it is only then the address offset for the ofm, for all operations, will be 16 byte
91 # aligned. For other values of axis the address offsets will be 16 byte aligned, as they are all based on c = 0
Patrik Gustavsson458a2082020-08-13 13:41:05 +020092 # and those addresses are always 16 byte aligned due to the NHCWB16 format.
Patrik Gustavsson6c97e9a2020-09-23 11:02:18 +020093 if axis == -1 or axis == (len(tens.shape) - 1):
Patrik Gustavsson458a2082020-08-13 13:41:05 +020094 for op in tens.ops:
95 if op.attrs["concat_start"] % 16 != 0:
96 tens.avoid_NHCWB16 = True
97 break
98
Tim Hall79d07d22020-04-27 18:20:16 +010099 return tens
100
101
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200102def rewrite_split(tens, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +0100103
Louis Verhaardaee5d752020-09-30 09:01:52 +0200104 if len(tens.ops) == 1 and tens.ops[0].type.is_split_op():
Tim Hall79d07d22020-04-27 18:20:16 +0100105 split_op = tens.ops[0]
106
107 # Not supported so leave it and run on CPU
108 if not split_op.run_on_npu:
109 return tens
110
111 inp, outputs, axis, offset_start, offset_end = split_op.get_split_inputs_axis()
112
113 tens.ops = []
Louis Verhaardaee5d752020-09-30 09:01:52 +0200114 new_op = Operation(Op.SplitSliceRead, split_op.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100115 new_op.inputs = [inp]
Tim Hall79d07d22020-04-27 18:20:16 +0100116
117 # For Split the offset cannot be extracted from the tensor so it has to
118 # be calculated from the index of the output tensor
Diego Russoea6111a2020-04-14 18:41:58 +0100119 if axis is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100120 # Get the start and end of the split
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100121 offset_start = [0] * 4
122 for idx, out in enumerate(outputs):
Tim Hall79d07d22020-04-27 18:20:16 +0100123 if out == tens:
124 break
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100125 axis_4D = axis + (4 - len(out.shape))
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000126
127 offset_start[axis_4D] += split_op.ofm_shapes[idx].get_dim(axis_4D)
Tim Hall79d07d22020-04-27 18:20:16 +0100128
Patrik Gustavssoneebb1c22020-08-18 15:03:04 +0200129 # If start offset is not a multiple of 16 in the C-dimension, NHCWB16 need to be avoided in the input
130 if (offset_start[-1] % 16) != 0:
131 inp.avoid_NHCWB16 = True
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100132 else:
133 offset_start = full_shape(4, offset_start, 0)
Tim Hall79d07d22020-04-27 18:20:16 +0100134
135 new_op.attrs["split_start"] = offset_start
Tim Hall79d07d22020-04-27 18:20:16 +0100136 new_op.run_on_npu = True
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100137 new_op.set_output_tensor(tens)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000138 new_op.set_ifm_ofm_shapes()
Tim Halle6ccd872020-11-09 16:46:37 +0000139 DebugDatabase.add_optimised(split_op, new_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100140
141 return tens
142
143
144def needed_total_padding(input_size, stride, filter_size):
145 out_size = (input_size + stride - 1) // stride
146 needed_input = (out_size - 1) * stride + filter_size
147 total_padding = max(0, needed_input - input_size)
148 return total_padding
149
150
151def calc_padding_and_skirt(padding_type, kernel_size, stride, input_dims):
152 ypad = needed_total_padding(int(input_dims[1]), int(stride[1]), int(kernel_size[0]))
153 xpad = needed_total_padding(int(input_dims[2]), int(stride[2]), int(kernel_size[1]))
Michael McGeagh16895482020-12-14 15:51:20 +0000154 if padding_type == Padding.SAME:
Tim Hall79d07d22020-04-27 18:20:16 +0100155 left_pad = (xpad + 0) // 2
156 right_pad = (xpad + 1) // 2
157 top_pad = (ypad + 0) // 2
158 bottom_pad = (ypad + 1) // 2
Michael McGeagh16895482020-12-14 15:51:20 +0000159 elif padding_type == Padding.VALID:
Tim Hall79d07d22020-04-27 18:20:16 +0100160 left_pad = 0
161 right_pad = 0
162 top_pad = 0
163 bottom_pad = 0
164 else:
Michael McGeagh16895482020-12-14 15:51:20 +0000165 raise UnsupportedFeatureError(f"Unknown padding")
Tim Hall79d07d22020-04-27 18:20:16 +0100166 padding = (top_pad, left_pad, bottom_pad, right_pad)
167 skirt = (top_pad, left_pad, ypad - top_pad, xpad - left_pad)
168 return padding, skirt
169
Tim Hallc30f4952020-06-15 20:47:35 +0100170
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200171def calc_upscaled_padding_and_skirt(padding_type, kernel_size, stride, input_dims, upscaling_factor):
172 kernel_height, kernel_width = kernel_size[0], kernel_size[1]
Michael McGeagh16895482020-12-14 15:51:20 +0000173 if padding_type == Padding.SAME:
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200174 ypad = needed_total_padding(int(input_dims[1]) * upscaling_factor, int(stride[1]), int(kernel_height))
175 xpad = needed_total_padding(int(input_dims[2]) * upscaling_factor, int(stride[2]), int(kernel_width))
Jacob Bohlind47cc272020-08-24 11:42:14 +0200176 right_pad = max(((xpad + 1) // upscaling_factor) - 1, 0)
177 bottom_pad = max(((ypad + 1) // upscaling_factor) - 1, 0)
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200178 left_pad = max(kernel_width - 1 - right_pad, 0)
179 top_pad = max(kernel_height - 1 - bottom_pad, 0)
Michael McGeagh16895482020-12-14 15:51:20 +0000180 elif padding_type == Padding.VALID:
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200181 right_pad = max(kernel_width - 2, 0)
182 bottom_pad = max(kernel_height - 2, 0)
183 left_pad = kernel_width - 1
184 top_pad = kernel_height - 1
Jacob Bohlincf7da102020-05-20 09:03:40 +0200185 else:
Michael McGeagh16895482020-12-14 15:51:20 +0000186 raise UnsupportedFeatureError(f"Unknown padding")
Jacob Bohlincf7da102020-05-20 09:03:40 +0200187 padding = (top_pad, left_pad, bottom_pad, right_pad)
Jacob Bohlin9b64ba02020-07-07 17:15:22 +0200188 skirt = padding
Jacob Bohlincf7da102020-05-20 09:03:40 +0200189 return padding, skirt
190
Tim Hall79d07d22020-04-27 18:20:16 +0100191
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200192def fixup_conv2d_backprop(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200193 if op.type == Op.Conv2DBackpropInput:
Tim Hall79d07d22020-04-27 18:20:16 +0100194 # flip the inputs
195 op.inputs[0], op.inputs[2] = op.inputs[2], op.inputs[0]
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000196 op.set_ifm_ofm_shapes()
Louis Verhaardaee5d752020-09-30 09:01:52 +0200197 op.type = Op.Conv2DBackpropInputSwitchedBias
Jacob Bohlincf7da102020-05-20 09:03:40 +0200198
199 # Update strides
Tim Hallc30f4952020-06-15 20:47:35 +0100200 op.attrs.update({"stride_w": 1, "stride_h": 1, "strides": (1, 1, 1, 1)})
Tim Hall79d07d22020-04-27 18:20:16 +0100201
202 return op
203
204
Charles Xu9a03fdf2020-07-02 15:12:40 +0200205# Convert the op to an elementwise add
206def convert_resizebilinear_1x1_to_add(op):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200207 op.type = Op.Add
Charles Xu9a03fdf2020-07-02 15:12:40 +0200208 op.name = op.name + "_add"
Charles Xu9a03fdf2020-07-02 15:12:40 +0200209 op.attrs["resizebilinear"] = True
210 # Create an input tensor filled with zeros
211 shape = op.outputs[0].shape
212 tens = Tensor(shape, op.inputs[0].dtype, op.inputs[1].name + "_add")
213 tens.values = np.zeros(shape)
214 tens.quant_values = np.zeros(shape, np.uint8)
215 tens.quantization = QuantizationParameters(0.0, 255.0)
216 tens.quantization.scale_f32 = 1.0
217 tens.quantization.zero_point = 0
218 tens.consumer_list = [op]
219 tens_op = op.inputs[1].ops[0]
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100220 tens_op.set_output_tensor(tens)
Charles Xu9a03fdf2020-07-02 15:12:40 +0200221 # Set the add inputs
222 op.inputs[1] = op.inputs[0]
223 op.inputs[0] = tens
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000224 op.set_ifm_ofm_shapes()
Charles Xu9a03fdf2020-07-02 15:12:40 +0200225
226 return op
227
228
Charles Xu87c13502020-08-06 12:17:26 +0200229# Convert ResizeBilinear to a number of 2x2 pool ops
230def convert_resizebilinear_to_2x2_pool(op):
231 count = 0
232 pre_op = op
233 outputs = op.outputs
234
235 op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)})
236 if op.attrs["align_corners"]:
237 shape_modifier = 1
Michael McGeagh16895482020-12-14 15:51:20 +0000238 op.attrs["padding"] = Padding.VALID
Charles Xu87c13502020-08-06 12:17:26 +0200239 else:
240 shape_modifier = 0
Michael McGeagh16895482020-12-14 15:51:20 +0000241 op.attrs["padding"] = Padding.SAME
Charles Xu87c13502020-08-06 12:17:26 +0200242 op.inputs[0].resampling_mode = resampling_mode.NEAREST
243
244 upscaled_shape = np.array(op.inputs[0].shape[1:3])
245 out_shape = np.array(op.outputs[0].shape[1:3])
246 if (upscaled_shape == upscaled_shape * 2 - shape_modifier).all():
247 return op
248
249 while (upscaled_shape < out_shape).all():
250 if count == 0:
251 scaled_op = pre_op
252 else:
253 scaled_op = op.clone("_{}".format(count))
254 scaled_op.inputs[0] = pre_op.outputs[0]
255
256 upscaled_shape = upscaled_shape * 2 - shape_modifier
257
258 if (upscaled_shape == out_shape).all():
259 scaled_op.outputs = outputs
260 scaled_op.outputs[0].ops = [scaled_op]
261 else:
262 shape = outputs[0].shape.copy()
263 shape[1:3] = upscaled_shape[0:2]
264 out_tens = Tensor(shape, DataType.int16, "{}_{}".format(op.outputs[0].name, count))
265 out_tens.quantization = op.outputs[0].quantization.clone()
266 out_tens.quantization.quant_min = np.iinfo(np.int16).min
267 out_tens.quantization.quant_max = np.iinfo(np.int16).max
268 scaled_op.set_output_tensor(out_tens)
269 pre_op = scaled_op
270 count += 1
271
272 # Setup the scale value
273 if scaled_op.inputs[0].dtype.bits == 8 and scaled_op.outputs[0].dtype.bits == 16:
274 scaled_op.attrs["rescale"] = 128
275 elif scaled_op.inputs[0].dtype.bits == 16 and scaled_op.outputs[0].dtype.bits == 8:
276 scaled_op.attrs["rescale"] = 1 / 128
277 elif "rescale" in scaled_op.attrs:
278 del scaled_op.attrs["rescale"]
Patrik Gustavssoncc6915c2020-12-22 09:16:50 +0100279 scaled_op.set_ifm_ofm_shapes()
Charles Xu87c13502020-08-06 12:17:26 +0200280
281 return op
282
283
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200284def fixup_resizebilinear(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200285 if op.type == Op.ResizeBilinear and op.run_on_npu:
Charles Xu87c13502020-08-06 12:17:26 +0200286 if op.inputs[0].shape == op.outputs[0].shape:
Charles Xu36ffaf32020-08-05 15:40:44 +0200287 # Bypass nop resizebilinear
288 op.inputs = op.inputs[:1]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200289 op.type = Op.Identity
Charles Xu87c13502020-08-06 12:17:26 +0200290 elif op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1:
291 convert_resizebilinear_1x1_to_add(op)
292 else:
293 convert_resizebilinear_to_2x2_pool(op)
Charles Xu9a03fdf2020-07-02 15:12:40 +0200294
295 return op
296
297
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200298def convert_nop_split_to_identity(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200299 if op.type == Op.Split and op.attrs.get("num_splits") == 1:
Dwight Lidmanc3862c22020-09-14 15:22:33 +0200300 # the list comprehension should return a list with a single tensor
301 # if it shouldn't, remove_passthrough_tensor will fail appropriately
302 op.inputs = [i for i in op.inputs if i.shape == op.outputs[0].shape]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200303 op.type = Op.Identity
Dwight Lidmanc3862c22020-09-14 15:22:33 +0200304 return op
305
306
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200307def fixup_fully_connected_input(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200308 if op.type == Op.FullyConnected:
Tim Hall79d07d22020-04-27 18:20:16 +0100309 inp = op.inputs[0]
310 weights = op.inputs[1]
311
312 n_in_elems = weights.shape[-2]
313 elms = inp.elements()
314 batch_size = elms // n_in_elems
315 assert batch_size * n_in_elems == elms
316
317 desired_shape = [batch_size, n_in_elems]
318 if inp.shape != desired_shape:
319 # mismatch, insert a reshape to fix this.
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200320 op.set_input_tensor(create_reshape_tensor(inp, desired_shape), 0)
Tim Hall79d07d22020-04-27 18:20:16 +0100321
322 return op
323
324
Diqing Zhong94457b12020-12-09 15:22:40 +0100325def convert_batched_fc_shape(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200326 if op.type == Op.FullyConnected:
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200327 ifm = op.inputs[0]
328 ofm = op.outputs[0]
329 # Check if the FC is 2D and first dimension indicates batching
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100330 # TOD0 op.ifm_shape[0] > 1 is enough when refactory is complete
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000331 if len(ifm.shape) == len(ofm.shape) == 2 and ifm.shape[0] > 1 and op.ifm_shapes[0].batch > 1:
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200332 n = ifm.shape[0]
333 batching_split = {4: (2, 2), 8: (2, 4), 16: (4, 4)}
334 h, w = batching_split.get(n, (1, n))
335
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200336 prev_op = ifm.ops[0]
337 desired_shape = [1, h, w, ifm.shape[-1]]
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000338 op.ifm_shapes[0] = Shape4D(desired_shape)
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100339
Louis Verhaardaee5d752020-09-30 09:01:52 +0200340 if len(ifm.consumer_list) == 1 and prev_op is not None and prev_op.type == Op.Reshape:
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200341 # There is a preceding Reshape
342 # Compare input of prev_op and input of op, to see if prev_op can be removed
343 ifm_prev_op = prev_op.inputs[0]
Tim Hall89567612020-10-27 11:57:57 +0000344 if ifm_prev_op.shape == ifm.shape and check_quantized_tens_scaling_equal(ifm_prev_op, ifm):
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200345 # prev_op can be removed
346 op.set_input_tensor(ifm_prev_op, 0)
347 else:
348 op.inputs[0].set_all_shapes(desired_shape)
349 prev_op.set_input_tensor(
350 create_const_tensor(prev_op.inputs[1].name, [1], DataType.int32, desired_shape), 1
351 )
352 prev_op.attrs["new_shape"] = desired_shape
353 else:
354 # Add reshape op to the input if there is no preceding reshape
355 ifm.consumer_list.remove(op)
356 op.set_input_tensor(create_reshape_tensor(ifm, desired_shape), 0)
357
358 # Reshape Weights to be 4D. IO becomes HWIO
359 weight_tensor = op.inputs[1]
360 weight_tensor.quant_values = np.expand_dims(np.expand_dims(weight_tensor.quant_values, axis=0), axis=0)
361 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
362
363 desired_shape = [1, h, w, ofm.shape[-1]]
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000364 op.ofm_shapes[0] = Shape4D(desired_shape)
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100365
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200366 if (
367 len(ofm.consumer_list) == 1
368 and ofm.consumer_list[0] is not None
Louis Verhaardaee5d752020-09-30 09:01:52 +0200369 and ofm.consumer_list[0].type == Op.Reshape
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200370 ):
371 # There is a subsequent Reshape
372 # Compare desired shape and output of consumer op, to see if consumer op can be removed
373 ofm_cons_op = ofm.consumer_list[0].outputs[0]
Tim Hall93582962020-09-09 21:58:15 +0100374 if desired_shape == ofm_cons_op.shape and check_quantized_tens_scaling_equal(ofm, ofm_cons_op):
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200375 op.outputs[0] = ofm_cons_op
376 op.outputs[0].ops = [op]
377 else:
378 op.outputs[0].set_all_shapes(desired_shape)
379 else:
Diqing Zhong94457b12020-12-09 15:22:40 +0100380 # Add reshape op to the output
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200381 op.set_output_tensor(create_reshape_tensor(ofm, desired_shape, False))
382 return op
383
384
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200385def fixup_pack_input(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200386 if op.type == Op.Pack:
Tim Hall79d07d22020-04-27 18:20:16 +0100387 # Pack is also referred to as Stack
388 # Requires the rewrite_concat function to be called on the op afterwards
389 axis = int(op.attrs["axis"])
390 desired_shape = op.inputs[0].shape[:axis] + [1] + op.inputs[0].shape[axis:]
391
392 # Construct 1 shape tensor to be used by all inserted reshape ops
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100393 new_shape_tens = create_const_tensor(op.name + "_reshape_shape", [1], DataType.int32, desired_shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100394
395 for idx, inp in enumerate(op.inputs):
Tim Hall79d07d22020-04-27 18:20:16 +0100396 reshape_out = inp.clone("_reshaped")
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100397 reshape_out.set_all_shapes(desired_shape)
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100398
Louis Verhaardaee5d752020-09-30 09:01:52 +0200399 reshape_op = Operation(Op.Reshape, "{}{}_reshape".format(op.name, idx))
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100400 reshape_op.attrs["new_shape"] = desired_shape
401 reshape_op.inputs = [inp, new_shape_tens]
402 reshape_op.set_output_tensor(reshape_out)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000403 reshape_op.set_ifm_ofm_shapes()
Tim Halle6ccd872020-11-09 16:46:37 +0000404 DebugDatabase.add_optimised(op, reshape_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100405
406 op.inputs[idx] = reshape_out
407
Louis Verhaardaee5d752020-09-30 09:01:52 +0200408 op.type = Op.PackReshaped
Tim Hall79d07d22020-04-27 18:20:16 +0100409
410 return op
411
412
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200413def unfuse_activation_function(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200414 if op.type == Op.ConcatTFLite and op.run_on_npu and op.activation is not None:
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100415 act_op = Operation(op.activation.op_type, op.name + op.activation.op_type.name)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200416 op.activation = None
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200417 out_tens = op.outputs[0]
418 intermediate_tens = out_tens.clone("_act_intermediate")
419 act_op.set_output_tensor(out_tens)
420 act_op.add_input_tensor(intermediate_tens)
421 op.set_output_tensor(intermediate_tens)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000422 act_op.set_ifm_ofm_shapes()
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200423
424 return op
425
Louis Verhaard8912c532020-09-30 12:11:49 +0200426
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100427def fixup_stridedslice_output(tens, arch, nng):
428 op = tens.ops[0]
Dwight Lidman73320a42020-11-05 10:34:41 +0100429 if op.run_on_npu and op.type == Op.StridedSlice:
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100430 reshape_input_shape = tens.shape
431 new_axis_mask = op.attrs["new_axis_mask"]
432 shrink_axis_mask = op.attrs["shrink_axis_mask"]
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100433
Dwight Lidman73320a42020-11-05 10:34:41 +0100434 if shrink_axis_mask != 0:
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100435 n = 0
436 axis = 0
437 while shrink_axis_mask:
438 prev_mask = shrink_axis_mask
439 n += 1
440 shrink_axis_mask &= shrink_axis_mask - 1
441 axis = int(math.log2(prev_mask - shrink_axis_mask))
442 reshape_input_shape = reshape_input_shape[:axis] + [1] + reshape_input_shape[axis:]
443
444 assert len(tens.shape) == (len(op.inputs[0].shape) - n)
445 op.attrs["shrink_axis_mask"] = 0
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100446 elif new_axis_mask != 0:
447 n = 0
448 axis = 0
449 while new_axis_mask:
450 prev_mask = new_axis_mask
451 n += 1
452 new_axis_mask &= new_axis_mask - 1
453 axis = int(math.log2(prev_mask - new_axis_mask))
454 reshape_input_shape = reshape_input_shape[:axis] + reshape_input_shape[(axis + 1) :]
455 new_axis_mask >>= 1
456
457 assert len(tens.shape) == (len(op.inputs[0].shape) + n)
458 op.attrs["new_axis_mask"] = 0
Dwight Lidmandda21af2020-11-11 15:44:57 +0100459 else:
460 # Equal Rank StridedSlice, no need to insert reshape
461 return tens
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100462
463 # Construct 1 shape tensor to be used by all inserted reshape ops
464 new_shape_tens = create_const_tensor(op.name + "_reshape_shape", [1], DataType.int32, tens.shape)
465
466 for idx, out_tens in enumerate(op.outputs):
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000467 op.ofm_shapes[idx] = Shape4D(new_shape_tens.shape)
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100468 reshape_in = out_tens.clone("_reshaped")
469 reshape_in.set_all_shapes(reshape_input_shape)
470 reshape_in.ops = [op]
471
472 reshape_op = Operation(Op.Reshape, "{}{}_reshape".format(op.name, idx))
473 reshape_op.attrs["new_shape"] = reshape_input_shape
474 reshape_op.inputs = [reshape_in, new_shape_tens]
475 reshape_op.set_output_tensor(out_tens)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000476 reshape_op.set_ifm_ofm_shapes()
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100477
478 op.outputs[idx] = reshape_in
479
480 return tens
481
482
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200483def fixup_unpack_output(tens, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +0100484 op = tens.ops[0]
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100485 if op.run_on_npu and op.type == Op.Unpack:
Tim Hall79d07d22020-04-27 18:20:16 +0100486 # Unpack is also referred to as Unstack
487 # Requires the rewrite_split function to be called on the op afterwards
Diqing Zhongc7c0b1b2020-10-26 11:45:25 +0100488 axis = int(op.attrs["axis"])
489 op.type = Op.UnpackReshaped
490 reshape_input_shape = tens.shape[:axis] + [1] + tens.shape[axis:]
Tim Hall79d07d22020-04-27 18:20:16 +0100491
492 # Construct 1 shape tensor to be used by all inserted reshape ops
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100493 new_shape_tens = create_const_tensor(op.name + "_reshape_shape", [1], DataType.int32, tens.shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100494
495 for idx, out_tens in enumerate(op.outputs):
Tim Hall79d07d22020-04-27 18:20:16 +0100496 reshape_in = out_tens.clone("_reshaped")
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100497 reshape_in.set_all_shapes(reshape_input_shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100498 reshape_in.ops = [op]
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100499
Louis Verhaardaee5d752020-09-30 09:01:52 +0200500 reshape_op = Operation(Op.Reshape, "{}{}_reshape".format(op.name, idx))
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100501 reshape_op.attrs["new_shape"] = reshape_input_shape
Tim Hall79d07d22020-04-27 18:20:16 +0100502 reshape_op.inputs = [reshape_in, new_shape_tens]
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100503 reshape_op.set_output_tensor(out_tens)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000504 reshape_op.set_ifm_ofm_shapes()
Tim Halle6ccd872020-11-09 16:46:37 +0000505 DebugDatabase.add_optimised(op, reshape_op)
Tim Hall79d07d22020-04-27 18:20:16 +0100506
507 op.outputs[idx] = reshape_in
Tim Hall79d07d22020-04-27 18:20:16 +0100508 return tens
509
510
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200511def add_padding_fields(op, arch, nng):
Jacob Bohlin90033f32020-08-28 15:45:44 +0200512 if op.run_on_npu:
513 if "padding" in op.attrs:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200514 if op.type.is_conv2d_op() or op.type.is_depthwise_conv2d_op():
Jacob Bohlin90033f32020-08-28 15:45:44 +0200515 kernel_size = op.inputs[1].shape[:2]
516 input_shape = op.inputs[0].shape
Louis Verhaardaee5d752020-09-30 09:01:52 +0200517 elif op.type.is_pool_op() or op.type.npu_block_type == NpuBlockType.ReduceSum:
Jacob Bohlin90033f32020-08-28 15:45:44 +0200518 kernel_size = op.attrs["ksize"][1:3]
519 input_shape = op.inputs[0].shape
Jacob Bohlin90033f32020-08-28 15:45:44 +0200520 else:
Michael McGeagh7a6f8432020-12-02 15:29:22 +0000521 raise UnsupportedFeatureError(f"Unknown operation that uses padding: {optype_to_builtintype(op.type)}")
Tim Hall79d07d22020-04-27 18:20:16 +0100522
Louis Verhaardaee5d752020-09-30 09:01:52 +0200523 if op.type == Op.Conv2DBackpropInputSwitchedBias:
Jacob Bohlin90033f32020-08-28 15:45:44 +0200524 upscaling_factor = op.outputs[0].shape[1] // input_shape[1]
525 padding, skirt = calc_upscaled_padding_and_skirt(
526 op.attrs["padding"], kernel_size, op.attrs["strides"], input_shape, upscaling_factor
527 )
528 else:
529 dilation_h, dilation_w = op.get_dilation_h_w()
530 dilated_kernel_size = [dilation_h * (kernel_size[0] - 1) + 1, dilation_w * (kernel_size[1] - 1) + 1]
531 padding, skirt = calc_padding_and_skirt(
532 op.attrs["padding"], dilated_kernel_size, op.attrs["strides"], input_shape
533 )
Jacob Bohlincf7da102020-05-20 09:03:40 +0200534
Jacob Bohlin90033f32020-08-28 15:45:44 +0200535 op.attrs["explicit_padding"] = padding
536 op.attrs["skirt"] = skirt
Jacob Bohlincf7da102020-05-20 09:03:40 +0200537
Tim Hall79d07d22020-04-27 18:20:16 +0100538 return op
539
540
Tim Hall79d07d22020-04-27 18:20:16 +0100541# Check if the op can be reordered
542def get_prepend_op(op):
543 inp = op.inputs[0]
544 # The op should be reordered between prev_op and prep_op
545 prev_op = inp.ops[-1]
546 prep_op = None
547 while prev_op.type in memory_only_ops and len(prev_op.outputs) == 1 and len(prev_op.outputs[0].consumers()) == 1:
548 prep_op = prev_op
549 inp = prev_op.inputs[0]
550 prev_op = inp.ops[-1]
Diego Russoea6111a2020-04-14 18:41:58 +0100551 if prev_op is not None and len(prev_op.outputs) == 1 and len(prev_op.outputs[0].consumers()) == 1:
Tim Hall79d07d22020-04-27 18:20:16 +0100552 return prep_op
553
554 return None
555
556
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200557def convert_depthwise_to_conv(op, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +0100558 # Depthwise is equivalent to a single conv2d if the ifm depth is 1 and
559 # the ofm depth equals the depth multipler.
560 # If those conditions are true, then we can perform a simple
561 # switch of the operator type (and weight order)
562
Louis Verhaardaee5d752020-09-30 09:01:52 +0200563 if op.type == Op.DepthwiseConv2DBias and (op.attrs["depth_multiplier"] != 1):
Tim Hall79d07d22020-04-27 18:20:16 +0100564 ifm_tensor = op.inputs[0]
565 weight_tensor = op.inputs[1]
566 ofm_tensor = op.outputs[0]
567 if (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"]):
568 # Change op type to Conv2d
Louis Verhaardaee5d752020-09-30 09:01:52 +0200569 op.type = Op.Conv2DBias
Tim Hall79d07d22020-04-27 18:20:16 +0100570 del op.attrs["channel_multiplier"]
571 del op.attrs["depth_multiplier"]
572
573 weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2))
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100574 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
Tim Hall79d07d22020-04-27 18:20:16 +0100575 else:
Louis Verhaard7db78962020-05-25 15:05:26 +0200576 raise UnsupportedFeatureError(
Michael McGeagh7a6f8432020-12-02 15:29:22 +0000577 f"Unsupported 'DEPTHWISE_CONV_2D' with depth_multiplier = {op.attrs['depth_multiplier']},",
578 f" ifm channels = {ifm_tensor.shape[3]}, ofm channels = {ofm_tensor.shape[3]}",
Tim Hall79d07d22020-04-27 18:20:16 +0100579 )
Tim Halle6ccd872020-11-09 16:46:37 +0000580 DebugDatabase.add_optimised(op, op)
Tim Hall79d07d22020-04-27 18:20:16 +0100581 return op
582
583
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200584def reorder_depthwise_weights(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200585 if op.type.is_depthwise_conv2d_op():
Jacob Bohline843d332020-06-23 12:12:56 +0200586 weight_tensor = op.inputs[1]
587 weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2))
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100588 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
Jacob Bohline843d332020-06-23 12:12:56 +0200589 weight_tensor.weight_transpose_depthwise = True
590
591 return op
592
593
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200594def convert_conv_to_fc(op, arch, nng):
Michael McGeagh8d939c02020-07-29 13:11:43 +0100595 # Conv 1x1 can be equivalent to Fully Connected.
596 # By representing certain convs as fully connected layers, Vela can better determine wether or not to use
597 # caching/double buffering for the weights.
598 # (Weights dont need to be reloaded for convs when IFM H and W are 1)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200599 if op.type == Op.Conv2DBias:
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000600 h = op.ifm_shapes[0].height
601 w = op.ifm_shapes[0].width
Michael McGeagh8d939c02020-07-29 13:11:43 +0100602 kh, kw, _, _ = op.inputs[1].shape
603 if h == 1 and w == 1 and kh == 1 and kw == 1:
604 # Overwrite this op as a Fully Connected Op
605 op.name += "_fc"
Louis Verhaardaee5d752020-09-30 09:01:52 +0200606 op.type = Op.FullyConnected
Michael McGeagh8d939c02020-07-29 13:11:43 +0100607 op.attrs = {
Michael McGeagh8d939c02020-07-29 13:11:43 +0100608 "weights_format": 0,
Michael McGeagh8d939c02020-07-29 13:11:43 +0100609 }
610 # Reshape Weights to be 2D. HWIO becomes just IO (as H and W are 1, they can just be dropped)
611 weight_tensor = op.inputs[1]
612 weight_tensor.quant_values = weight_tensor.quant_values.squeeze(axis=(0, 1))
613 weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape))
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100614
Michael McGeagh8d939c02020-07-29 13:11:43 +0100615 # The output from a fully connected is expected to be 2D so we need to add a reshape layer to convert it
616 # back to 4D afterwards as the next layer is expecting that shape
617 orig_ofm_tensor = op.outputs[0]
618 # Reshape this ops output to be 2D: {(N*H*W), C} (We know N H and W are all 1 so this becomes {1, C})
619 fc_ofm_tensor = orig_ofm_tensor.clone("_fc")
620 fc_ofm_tensor.set_all_shapes([1, fc_ofm_tensor.shape[-1]])
621 fc_ofm_tensor.ops = [op]
622 # Add a reshape after the new OFM to convert it back to the original 4D shape
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100623 reshape_name = op.name + "_reshape"
624 new_shape_tens = create_const_tensor(reshape_name + "_shape", [1], DataType.int32, orig_ofm_tensor.shape)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200625 reshape_op = Operation(Op.Reshape, reshape_name)
Michael McGeagh8d939c02020-07-29 13:11:43 +0100626 reshape_op.attrs["new_shape"] = orig_ofm_tensor.shape
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100627 reshape_op.inputs = [fc_ofm_tensor, new_shape_tens]
628 reshape_op.set_output_tensor(orig_ofm_tensor)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000629 reshape_op.set_ifm_ofm_shapes()
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100630
Michael McGeagh8d939c02020-07-29 13:11:43 +0100631 # Replace this ops OFM to point to the 2D tensor
632 op.outputs[0] = fc_ofm_tensor
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000633 op.set_ifm_ofm_shapes()
Tim Halle6ccd872020-11-09 16:46:37 +0000634 # Record optimisation in debug database
635 DebugDatabase.add_optimised(op, reshape_op)
636 DebugDatabase.add_optimised(op, op)
Michael McGeagh8d939c02020-07-29 13:11:43 +0100637 return op
638
639
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200640def fixup_relus_with_differing_ifm_ofm_scaling(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200641 if op.run_on_npu and op.type.is_relu_op():
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100642 ifm = op.inputs[0]
643 ofm = op.outputs[0]
644 # Relu with differing IFM and OFM scaling cannot be fused with another primary op
645 # and requires its own to be inserted
Tim Hall93582962020-09-09 21:58:15 +0100646 if not check_quantized_tens_scaling_equal(ifm, ofm):
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100647 # Override this op with its own primary op (avgpool)
648 relu_fused_op = create_avgpool_nop(op.name + "_avgpool")
649 # And fuse the original activation function to it
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100650 relu_fused_op.activation = create_activation_function(op.type)
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100651 # Tidy up and assign the ifm and ofm to the new op
652 ifm.consumer_list.remove(op)
Andreas Nevalainenf3d737e2020-09-25 14:12:43 +0200653
654 # if not 4d, reshape ifm/ofm
655 if len(ifm.shape) < 4:
656 ifm_shaped = create_reshape_tensor(ifm, full_shape(4, ifm.shape, 1))
657 ifm = ifm_shaped
658 if len(ofm.shape) < 4:
659 ofm_shaped = create_reshape_tensor(ofm, full_shape(4, ofm.shape, 1), False)
660 ofm = ofm_shaped
661
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100662 relu_fused_op.add_input_tensor(ifm)
663 relu_fused_op.set_output_tensor(ofm)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000664 relu_fused_op.set_ifm_ofm_shapes()
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +0100665 op = relu_fused_op
666 return op
667
668
Tim Hall79d07d22020-04-27 18:20:16 +0100669# Reorder activation op if it's after the memory only operations
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200670def fixup_act_reorder(op, arch, nng):
Michael McGeaghf3e3ad72020-12-02 12:39:03 +0000671 if op.type.is_relu_op() or op.type in (Op.Sigmoid, Op.Tanh):
Tim Hall79d07d22020-04-27 18:20:16 +0100672 prep_op = get_prepend_op(op)
Diego Russoea6111a2020-04-14 18:41:58 +0100673 if prep_op is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100674 act_op = op.clone("_reordered")
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100675 act_op.ifm_shapes = list(op.ifm_shapes)
676 act_op.ofm_shapes = list(op.ofm_shapes)
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200677
678 # There is only one input tensor, overwrite it
679 act_op.set_input_tensor(prep_op.inputs[0], 0)
680
Tim Hall79d07d22020-04-27 18:20:16 +0100681 act_op_out = act_op.inputs[0].clone("_acted")
682 act_op_out.quantization = op.outputs[0].quantization.clone()
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100683 act_op.set_output_tensor(act_op_out)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000684 act_op.ifm_shapes[0] = Shape4D(prep_op.inputs[0].shape)
685 act_op.ofm_shapes[0] = Shape4D(act_op_out.shape)
Patrik Gustavssoncb337042020-09-16 14:55:40 +0200686
687 # Update the consumer list
688 act_op_out.consumer_list = op.outputs[0].consumer_list.copy()
689 act_op_out.consumer_list.append(prep_op)
690
Tim Hall79d07d22020-04-27 18:20:16 +0100691 prep_op.inputs[0] = act_op_out
692 prep_op.outputs[0].quantization = act_op_out.quantization.clone()
693
694 # Mark the op so that it will be removed as passthrough later on
Louis Verhaardaee5d752020-09-30 09:01:52 +0200695 op.type = Op.Identity
Tim Halle6ccd872020-11-09 16:46:37 +0000696
697 # Record optimisation in debug database
698 DebugDatabase.add_optimised(op, act_op)
699 DebugDatabase.add_optimised(op, op)
Tim Hall79d07d22020-04-27 18:20:16 +0100700 return op
701
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200702
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200703def fixup_elementwise_with_scalars(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200704 if op.type.is_binary_elementwise_op():
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200705 ifm_tensor, ifm2_tensor, _, _ = op.get_ifm_ifm2_weights_ofm()
Charles Xu78792222020-05-13 10:15:26 +0200706 if ifm2_tensor.shape != [] and ifm_tensor.shape != []:
707 diff = len(ifm_tensor.shape) - len(ifm2_tensor.shape)
708 if diff > 0:
709 ifm2_tensor.shape = full_shape(len(ifm_tensor.shape), ifm2_tensor.shape, 1)
710 elif diff < 0:
711 ifm_tensor.shape = full_shape(len(ifm2_tensor.shape), ifm_tensor.shape, 1)
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200712 elif ifm_tensor.shape == [] and ifm_tensor.quant_values is None:
713 # IFM is marked as a scalar, but is a result of an operation; change it to a shape of size 1
714 ifm_tensor.shape = len(ifm2_tensor.shape) * [1]
715 ifm_tensor.storage_shape = ifm_tensor.shape
716 elif ifm2_tensor.shape == [] and ifm2_tensor.quant_values is None:
717 # IFM2 is marked as a scalar, but is a result of an operation; change it to a shape of size 1
718 ifm2_tensor.shape = len(ifm_tensor.shape) * [1]
719 ifm2_tensor.storage_shape = ifm2_tensor.shape
Charles Xu78792222020-05-13 10:15:26 +0200720 return op
Tim Hall79d07d22020-04-27 18:20:16 +0100721
Louis Verhaarde0ef2732020-06-03 08:56:44 +0200722
Tim Hall4e127762020-05-15 16:05:49 +0100723# Set input/output tensor equivalence to the same id for memory operations
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200724def set_tensor_equivalence(op, arch, nng):
Michael McGeagh11b0bdb2020-09-08 11:07:35 +0100725 if op.type in memory_only_ops:
Tim Hall4e127762020-05-15 16:05:49 +0100726 eid = op.outputs[0].equivalence_id
727 for inp in op.inputs:
728 inp.equivalence_id = eid
729 return op
730
731
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100732def set_ifm_ofm_op_shapes(op, arch, nng):
733 if op.run_on_npu and op.type.needs_shapes():
734 if op.ifm_shapes or op.ofm_shapes:
735 # Shapes already set
736 return op
737 op.set_ifm_ofm_shapes()
738 return op
739
740
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200741def convert_softmax(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200742 if op.type == Op.Softmax and op.run_on_npu:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200743 softmax = SoftMax(op)
744 op = softmax.get_graph()
745 return op
746
747
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200748def convert_mul_max_to_abs_or_lrelu(op, arch, nng):
Diego Russoea6111a2020-04-14 18:41:58 +0100749 r"""Whenever there is a subgraph with this topology:
Tim Hall79d07d22020-04-27 18:20:16 +0100750
751 Input X For X = -1 or X > 0
752 | \ / This subgraph can be replaced with either
753 | Mul an Abs (if X = -1) or a LeakyReLU (if X > 0)
754 | /
755 Max
756 """
757
Louis Verhaardaee5d752020-09-30 09:01:52 +0200758 if op.type == Op.Maximum:
Tim Hall79d07d22020-04-27 18:20:16 +0100759 # finds the Mul input(s) to the Max
Louis Verhaardaee5d752020-09-30 09:01:52 +0200760 muls = [i for i in op.inputs if i.ops[0].type == Op.Mul]
Tim Hall79d07d22020-04-27 18:20:16 +0100761 if len(muls) == 1:
762 mul = muls[0].ops[0]
763 elif len(muls) == 2:
764 # In the case both inputs are Muls, find the one with the same input as the Max
765 mul = [m for m in muls if len(set(op.inputs + m.ops[0].inputs)) == 1][0].ops[0]
766 else:
767 # No Mul inputs
768 return op
769
770 # make sure the Mul doesn't have any other consumers
Louis Verhaardd7911c42020-08-25 13:36:41 +0200771 mul_ofm = mul.outputs[0]
772 if len(mul_ofm.consumers()) != 1:
Tim Hall79d07d22020-04-27 18:20:16 +0100773 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200774 # make sure the Mul doesn't have a fused activation function
775 if mul.activation:
Tim Hall79d07d22020-04-27 18:20:16 +0100776 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200777 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100778 if ifm is None or ofm is None:
779 return op
780
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200781 if ifm.dtype not in (DataType.uint8, DataType.int8) or ifm.dtype != ofm.dtype:
782 return op
Tim Hall93582962020-09-09 21:58:15 +0100783 if not check_quantized_tens_scaling_equal(ifm, ofm) or not check_quantized_tens_scaling_equal(ifm, mul_ofm):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200784 # rewrite to LeakyRelu currently only makes sense if the quantization is identical
785 return op
Tim Hall79d07d22020-04-27 18:20:16 +0100786
787 # finds the branched input that goes to both the Max and the Mul
788 shared = set(op.inputs) & set(mul.inputs)
789 if len(shared) == 1:
790 shared_in = shared.pop()
791 # find the constant scalar input to the Mul
792 const_tens = (set(mul.inputs) - {shared_in}).pop()
793 # check that it is a scalar
794 if const_tens.shape != []:
795 return op
796 const = const_tens.ops[0]
797 # check that it is a constant
Louis Verhaardaee5d752020-09-30 09:01:52 +0200798 if const.type != Op.Const:
Tim Hall79d07d22020-04-27 18:20:16 +0100799 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200800 # Remove the Mul from the shared input's consumers
801 shared_in.consumer_list.remove(mul)
Tim Hall79d07d22020-04-27 18:20:16 +0100802 else:
803 return op
804
805 val = const.outputs[0].values
806 if val >= 0:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200807 new_op = Op.LeakyRelu
Tim Hall79d07d22020-04-27 18:20:16 +0100808 op.attrs["alpha"] = val
Louis Verhaardd7911c42020-08-25 13:36:41 +0200809 # to produce bit exact results, the alpha is not enough;
810 # save additional scaling info in attr "alpha_scale", to be used as input
811 # to the LUT construction
812 alpha_scalar = const_tens.quant_values - const_tens.quantization.zero_point
813 mul_ifm_scale = np.double(ifm.quantization.scale_f32)
814 mul_ifm2_scale = np.double(const_tens.quantization.scale_f32)
815 mul_ofm_scale = np.double(mul_ofm.quantization.scale_f32)
816 alpha_scale, alpha_shift = scaling.elementwise_mul_scale(mul_ifm_scale, mul_ifm2_scale, mul_ofm_scale)
817 op.attrs["alpha_scaling"] = (alpha_scalar, alpha_scale, alpha_shift)
Tim Hall79d07d22020-04-27 18:20:16 +0100818 elif val == -1:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200819 new_op = Op.Abs
Tim Hall79d07d22020-04-27 18:20:16 +0100820 else:
821 return op
822
Louis Verhaardaee5d752020-09-30 09:01:52 +0200823 op.type = new_op
824 op.name = op.name.replace("Maximum", new_op.name)
825 op.outputs[0].name = op.outputs[0].name.replace("Maximum", new_op.name)
Tim Hall79d07d22020-04-27 18:20:16 +0100826 op.inputs = [shared_in]
Tim Halle6ccd872020-11-09 16:46:37 +0000827
828 # Record optimisation in debug database
829 DebugDatabase.add_optimised(op, op)
830
Tim Hall79d07d22020-04-27 18:20:16 +0100831 return op
832
833
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200834def convert_lrelu_to_mul_max(op, arch):
835 # Converts LeakyRelu to Max(alpha * IFM, identity * IFM)
836 # (the opposite of convert_mul_max_to_abs_or_lrelu)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200837 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100838 if ifm is None or ofm is None:
839 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200840
841 # Add multiplication with alpha
Louis Verhaardaee5d752020-09-30 09:01:52 +0200842 mul_alpha = Operation(Op.Mul, op.name + "_mul_alpha")
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200843 mul_alpha.add_input_tensor(ifm)
844 # Create const tensor containing alpha as scalar
845 alpha = op.attrs["alpha"]
846 quantization = ifm.quantization.clone()
847 quantization.min = 0
848 quantization.max = alpha * (quantization.quant_max - quantization.quant_min)
849 quantization.scale_f32 = alpha
850 quantization.zero_point = 0
851 alpha_tens = create_const_tensor(op.name + "_alpha_scalar", [], ifm.dtype, [1], np.int8, quantization=quantization)
852 mul_alpha.add_input_tensor(alpha_tens)
853 fm_alpha = ofm.clone(op.name + "_alpha")
854 mul_alpha.set_output_tensor(fm_alpha)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000855 mul_alpha.set_ifm_ofm_shapes()
Tim Halle6ccd872020-11-09 16:46:37 +0000856 DebugDatabase.add_optimised(op, mul_alpha)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200857
Tim Hall93582962020-09-09 21:58:15 +0100858 if check_quantized_tens_scaling_equal(ifm, ofm):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200859 # No identity multiplication is needed
860 fm_id = ifm
861 else:
862 # Add multiplication with identity
Louis Verhaardaee5d752020-09-30 09:01:52 +0200863 mul_identity = Operation(Op.Mul, op.name + "_mul_identity")
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200864 mul_identity.add_input_tensor(ifm)
865 # Create const tensor containing identity as scalar
866 quantization = ifm.quantization.clone()
867 quantization.min = 0
868 quantization.max = quantization.quant_max - quantization.quant_min
869 quantization.scale_f32 = 1
870 quantization.zero_point = 0
871 identity_tens = create_const_tensor(
872 op.name + "_id_scalar", [], ifm.dtype, [1], np.uint8, quantization=quantization
873 )
874 mul_identity.add_input_tensor(identity_tens)
875 fm_id = ofm.clone(op.name + "_id")
876 mul_identity.set_output_tensor(fm_id)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000877 mul_identity.set_ifm_ofm_shapes()
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100878 DebugDatabase.add_optimised(op, mul_identity)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200879
880 # Convert LeakyRelu to Max, add the results of the multiplication(s) as inputs
Louis Verhaardaee5d752020-09-30 09:01:52 +0200881 op.type = Op.Maximum
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200882 op.name = op.name.replace("LeakyRelu", "Maximum")
883 op.inputs = []
884 ifm.consumer_list.remove(op)
885 op.add_input_tensor(fm_alpha)
886 op.add_input_tensor(fm_id)
Tim Halle6ccd872020-11-09 16:46:37 +0000887
888 DebugDatabase.add_optimised(op, op)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200889 return op
890
891
Louis Verhaard2e186c72020-10-09 10:47:04 +0200892def convert_to_lut(op, lut_values, lut_name):
Louis Verhaardf03bad32020-09-25 08:30:44 +0200893 # Rewrite the operation by Add with scalar 0 + LUT activation
894 ifm = op.inputs[0]
Tim Hall93582962020-09-09 21:58:15 +0100895 if ifm is None:
896 return op
Louis Verhaard58520b92020-08-24 16:45:38 +0200897 assert ifm.dtype.size_in_bytes() == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +0200898 op.type = Op.Add
Louis Verhaard2e186c72020-10-09 10:47:04 +0200899 op.name = op.name + "_lut_" + lut_name
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200900 # Mark as no-op to enable potential fusing optimizations
901 op.attrs["is_nop"] = True
902 # Create an input tensor containing scalar zero
903 quantization = QuantizationParameters(0.0, 255.0)
Louis Verhaardd7911c42020-08-25 13:36:41 +0200904 quantization.scale_f32 = ifm.quantization.scale_f32
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200905 quantization.zero_point = 0
Louis Verhaard2e186c72020-10-09 10:47:04 +0200906 tens = create_const_tensor(op.inputs[0].name + "_scalar0", [], ifm.dtype, [0], np.uint8, quantization=quantization)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200907 op.add_input_tensor(tens)
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000908 op.ifm_shapes.append(Shape4D(tens.shape))
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100909
Louis Verhaardf03bad32020-09-25 08:30:44 +0200910 # The LUT must be applied without any preceding rescaling (the LUT itself performs the rescale),
911 # so even if the OFM has a different scale than the IFM, the generated OFM scale instructions
912 # should be the same as the IFM
Louis Verhaardaee5d752020-09-30 09:01:52 +0200913 op.forced_output_quantization = ifm.quantization
Louis Verhaard2e186c72020-10-09 10:47:04 +0200914 lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8)
Louis Verhaardf03bad32020-09-25 08:30:44 +0200915 op.set_activation_lut(lut_tensor)
916 return op
917
918
Louis Verhaard2e186c72020-10-09 10:47:04 +0200919def convert_to_lut8(op, fn, fn_name):
Louis Verhaardf03bad32020-09-25 08:30:44 +0200920 # Converts op to a no-op + int8/uint8 LUT which is generated with the given function.
921 # fn is a function(real) -> real
Louis Verhaardaee5d752020-09-30 09:01:52 +0200922 ifm, ofm = op.get_ifm_ofm()
Louis Verhaardf03bad32020-09-25 08:30:44 +0200923 if ifm.dtype not in (DataType.uint8, DataType.int8) or ifm.dtype != ofm.dtype:
924 return op
925 # Generate the LUT
926 ifm_scale = np.double(ifm.quantization.scale_f32)
927 ofm_scale = np.double(ofm.quantization.scale_f32)
928 zp_in = ifm.quantization.zero_point
929 zp_out = ofm.quantization.zero_point
930 values = []
931 ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128)
932 quantized_min = min(ix)
933 quantized_max = max(ix)
934 for x in ix:
935 x_real = ifm_scale * (x - zp_in)
936 y_real = fn(x_real)
937 lut_result = round_away_zero(zp_out + y_real / ofm_scale)
938 lut_result = min(quantized_max, max(quantized_min, lut_result))
939 values.append(lut_result)
Louis Verhaard2e186c72020-10-09 10:47:04 +0200940 return convert_to_lut(op, values, fn_name)
Louis Verhaardf03bad32020-09-25 08:30:44 +0200941
942
943def convert_lrelu_to_lut(op, arch):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200944 ifm, ofm = op.get_ifm_ofm()
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200945 # Generate the LUT
Louis Verhaardd7911c42020-08-25 13:36:41 +0200946 alpha = op.attrs["alpha"]
947 ifm_scale = np.double(ifm.quantization.scale_f32)
948 ofm_scale = np.double(ofm.quantization.scale_f32)
949 zp_in = ifm.quantization.zero_point
950 zp_out = ofm.quantization.zero_point
951 identity_scale, identity_shift = scaling.elementwise_mul_scale(ifm_scale, 1, ofm_scale)
952 alpha_scalar = 1
953 alpha_scale, alpha_shift = scaling.elementwise_mul_scale(ifm_scale, alpha, ofm_scale)
954 if "alpha_scaling" in op.attrs:
955 # The LeakyRelu was the result from convert_mul_max_to_abs_or_lrelu
956 alpha_scalar, alpha_scale, alpha_shift = op.attrs["alpha_scaling"]
957 values = []
Louis Verhaard58520b92020-08-24 16:45:38 +0200958 ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128)
Louis Verhaardd7911c42020-08-25 13:36:41 +0200959 quantized_min = min(ix)
960 quantized_max = max(ix)
961 for x in ix:
962 if x < zp_in:
963 lut_result = zp_out + fp_math.multiply_by_quantized_multiplier(
964 alpha_scalar * (x - zp_in), alpha_scale, alpha_shift
965 )
966 else:
967 lut_result = zp_out + fp_math.multiply_by_quantized_multiplier(x - zp_in, identity_scale, identity_shift)
968 lut_result = min(quantized_max, max(quantized_min, lut_result))
969 values.append(lut_result)
Louis Verhaard2e186c72020-10-09 10:47:04 +0200970 return convert_to_lut(op, values, "lrelu")
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200971
972
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200973def convert_lrelu(op, arch, nng):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200974 # Converts LeakyRelu to a LUT based solution if possible, otherwise a mul + max
Louis Verhaardaee5d752020-09-30 09:01:52 +0200975 if op.type != Op.LeakyRelu:
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200976 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +0200977 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +0100978 if ifm is None or ofm is None:
979 return op
Louis Verhaardd7911c42020-08-25 13:36:41 +0200980 if ifm.dtype in (DataType.uint8, DataType.int8) and ifm.dtype == ofm.dtype:
981 # use LUT for int8/uint8
982 return convert_lrelu_to_lut(op, arch)
Tim Hall93582962020-09-09 21:58:15 +0100983 if check_quantized_tens_scaling_equal(ifm, ofm) and ifm.dtype == ofm.dtype == DataType.int16:
Louis Verhaardd7911c42020-08-25 13:36:41 +0200984 # use LeakyRelu unmodified for int16 with equal input/output scaling
985 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200986 return convert_lrelu_to_mul_max(op, arch)
987
988
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200989def convert_tanh_sigmoid_to_lut(op, arch, nng):
Louis Verhaardf03bad32020-09-25 08:30:44 +0200990 # Converts int8/uint8 Sigmoid and Tanh to a LUT based solution
Louis Verhaardaee5d752020-09-30 09:01:52 +0200991 if op.type == Op.Sigmoid:
Louis Verhaard2e186c72020-10-09 10:47:04 +0200992 return convert_to_lut8(op, clamp_sigmoid, "sigmoid")
Louis Verhaardaee5d752020-09-30 09:01:52 +0200993 elif op.type == Op.Tanh:
Louis Verhaard2e186c72020-10-09 10:47:04 +0200994 return convert_to_lut8(op, math.tanh, "tanh")
Louis Verhaardf03bad32020-09-25 08:30:44 +0200995 return op
996
997
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +0200998def remove_unwanted_reshapes(op, arch, nng):
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +0200999 # Try to remove reshapes enclosing ElementWise operator with only one non-constant input
Louis Verhaardaee5d752020-09-30 09:01:52 +02001000 if not op.run_on_npu or not op.type.is_elementwise_op():
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001001 return op
1002
1003 # Check if the ElementWise operator only have one non-constant input
Louis Verhaardaee5d752020-09-30 09:01:52 +02001004 non_const_tens = [x for x in op.inputs if x.ops[0].type != Op.Const]
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001005 if len(non_const_tens) != 1:
1006 return op
1007 ifm = non_const_tens[0]
1008
1009 # Check if operation is enclosed by Reshapes that can be removed
1010 ofm = op.outputs[0]
1011 prev_op = ifm.ops[0]
1012 if (
1013 len(ifm.consumer_list) == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +02001014 and prev_op.type == Op.Reshape
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001015 and len(ofm.consumer_list) == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +02001016 and ofm.consumer_list[0].type == Op.Reshape
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001017 ):
1018 # Operation is enclosed by reshapes, check if they can be removed
Louis Verhaardaee5d752020-09-30 09:01:52 +02001019 prev_op_ifm, prev_op_ofm = prev_op.get_ifm_ofm()
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001020 cons_op = ofm.consumer_list[0]
1021 cons_op_ifm = ofm
1022 cons_op_ofm = cons_op.outputs[0]
1023 if len(prev_op_ifm.shape) == len(cons_op_ofm.shape):
1024 # Check if quantization is the same in the input and output for the reshape ops
Tim Hall93582962020-09-09 21:58:15 +01001025 if check_quantized_tens_scaling_equal(prev_op_ifm, prev_op_ofm) and check_quantized_tens_scaling_equal(
1026 cons_op_ifm, cons_op_ofm
1027 ):
Patrik Gustavsson7ad862a2020-09-29 14:09:43 +02001028 op.set_input_tensor(prev_op_ifm, 0)
1029 op.set_output_tensor(cons_op_ofm)
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001030 return op
1031
1032
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001033def fuse_activation_function_with_prev(op, arch, nng):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001034 # if op is a no-op: attempts to move the activation function to the preceding op
Louis Verhaardaee5d752020-09-30 09:01:52 +02001035 if not op.attrs.get("is_nop", False) or op.activation is None:
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001036 return op
Louis Verhaardaee5d752020-09-30 09:01:52 +02001037 ifm, ofm = op.get_ifm_ofm()
Tim Hall93582962020-09-09 21:58:15 +01001038 if ifm is None or ofm is None:
1039 return op
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001040 # finds the input(s) to the operation
1041 prev_op = ifm.ops[0]
1042 # Note: the below checks on prev_op require that a first optimize pass on the full graph has been performed
1043 fuse = (
1044 prev_op.run_on_npu
Louis Verhaardaee5d752020-09-30 09:01:52 +02001045 and prev_op.type.npu_block_type != NpuBlockType.Default
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001046 and len(ifm.ops) == 1
1047 and len(prev_op.outputs[0].consumers()) == 1
Louis Verhaardaee5d752020-09-30 09:01:52 +02001048 and prev_op.activation is None
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001049 )
1050 if op.activation_lut is not None and arch.shram_reserved_unused_banks == 0:
1051 # TODO: if SHRAM LUT space is shared with SHRAM ACC (32, 64 MAC),
1052 # LUT currently only works correctly for elementwise ops
1053 fuse = False
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001054 if not fuse:
1055 return op
1056 # Move the fused activation function + corresponding info to prev_op
Louis Verhaardaee5d752020-09-30 09:01:52 +02001057 prev_op.activation = op.activation
1058 prev_op.forced_output_quantization = op.forced_output_quantization
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001059 if op.activation_lut is not None:
1060 prev_op.set_activation_lut(op.activation_lut)
1061 # Bypass op
Louis Verhaard98a34992020-09-01 10:39:04 +02001062 prev_op.set_output_tensor(ofm)
Tim Halle6ccd872020-11-09 16:46:37 +00001063 DebugDatabase.add_optimised(op, prev_op)
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001064 return op
1065
1066
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001067def add_attrs_to_resizebilinear(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +02001068 if op.type == Op.ResizeBilinear and op.run_on_npu:
Dwight Lidman42fed942020-05-29 09:37:03 +02001069 input_tensor = op.inputs[0]
1070 upscaled_shape = [input_tensor.shape[1] * 2, input_tensor.shape[2] * 2]
1071 out_shape = op.outputs[0].shape[1:3]
1072 if not op.attrs["align_corners"] and out_shape == upscaled_shape:
1073 # this means the output is supposed to be a x2 upscale,
1074 # so we need to do SAME padding
Michael McGeagh16895482020-12-14 15:51:20 +00001075 op.attrs["padding"] = Padding.SAME
Dwight Lidman42fed942020-05-29 09:37:03 +02001076 elif op.attrs["align_corners"] and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
1077 # here we can just run the avg pool without padding and
1078 # produce a (M * 2 - 1, N * 2 - 1) sized output
Michael McGeagh16895482020-12-14 15:51:20 +00001079 op.attrs["padding"] = Padding.VALID
Dwight Lidman42fed942020-05-29 09:37:03 +02001080 else:
Charles Xu9a03fdf2020-07-02 15:12:40 +02001081 return op
Dwight Lidman42fed942020-05-29 09:37:03 +02001082 input_tensor.resampling_mode = resampling_mode.NEAREST
Tim Hallc30f4952020-06-15 20:47:35 +01001083 op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)})
Dwight Lidman42fed942020-05-29 09:37:03 +02001084 return op
1085
1086
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001087def fixup_bias_tensors(op, arch, nng):
Louis Verhaardaee5d752020-09-30 09:01:52 +02001088 if op.type.needs_bias() and op.bias is None:
Jacob Bohlina41cd4d2020-08-26 18:21:28 +02001089 # Op has no bias, add bias tensor filled with zeros
1090 nr_biases = op.inputs[1].shape[-1]
1091 bias_values = [0] * nr_biases
1092 bias_tensor = create_const_tensor(op.name + "_bias", [nr_biases], DataType.int32, bias_values)
1093 bias_tensor.quant_values = bias_tensor.values
1094 op.set_input_tensor(bias_tensor, -1)
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +02001095
1096 return op
1097
1098
Patrik Gustavsson3010d9b2020-10-01 08:22:10 +02001099def supported_operator_check(op, arch, nng):
Tim Hall79d07d22020-04-27 18:20:16 +01001100 op.run_on_npu = arch.supported_operators.is_operator_supported(op)
1101 return op
1102
1103
Tim Halle6ccd872020-11-09 16:46:37 +00001104def _record_optimised(op, arch):
1105 if op.type != Op.Const:
1106 DebugDatabase.add_optimised(op, op)
1107
1108
Tim Hall79d07d22020-04-27 18:20:16 +01001109def optimise_graph_a(nng, arch, verbose_graph=False):
1110 if verbose_graph:
1111 nng.print_graph()
1112
Patrik Gustavsson2349d422020-12-01 16:02:29 +01001113 pre_process_list = [
1114 supported_operator_check,
1115 set_ifm_ofm_op_shapes,
1116 # TODO: memory-only Op removal
1117 ]
1118
1119 for idx, sg in enumerate(nng.subgraphs):
1120 # rewrite graph pass
1121 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
1122 nng, sg, arch, [], pre_process_list, rewrite_unsupported=False,
1123 )
1124
Tim Hall79d07d22020-04-27 18:20:16 +01001125 op_rewrite_list = [
Tim Hall4e127762020-05-15 16:05:49 +01001126 set_tensor_equivalence,
Tim Hall79d07d22020-04-27 18:20:16 +01001127 convert_depthwise_to_conv,
Michael McGeagh8d939c02020-07-29 13:11:43 +01001128 convert_conv_to_fc,
Fredrik Svedberga0c36242020-06-03 15:43:31 +02001129 convert_softmax,
Tim Hall79d07d22020-04-27 18:20:16 +01001130 fixup_fully_connected_input,
Diqing Zhong94457b12020-12-09 15:22:40 +01001131 convert_batched_fc_shape,
Tim Hall79d07d22020-04-27 18:20:16 +01001132 fixup_pack_input,
Fredrik Svedberg0f98b362020-09-29 10:00:39 +02001133 unfuse_activation_function,
Tim Hall79d07d22020-04-27 18:20:16 +01001134 fixup_conv2d_backprop,
Michael McGeagh8dbf8cf2020-09-08 11:09:48 +01001135 fixup_relus_with_differing_ifm_ofm_scaling,
Tim Hall79d07d22020-04-27 18:20:16 +01001136 fixup_act_reorder,
Charles Xu78792222020-05-13 10:15:26 +02001137 fixup_elementwise_with_scalars,
Jacob Bohline843d332020-06-23 12:12:56 +02001138 reorder_depthwise_weights,
Charles Xu9a03fdf2020-07-02 15:12:40 +02001139 fixup_resizebilinear,
Jacob Bohlina41cd4d2020-08-26 18:21:28 +02001140 fixup_bias_tensors,
Dwight Lidmanc3862c22020-09-14 15:22:33 +02001141 convert_nop_split_to_identity,
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001142 convert_mul_max_to_abs_or_lrelu,
Patrik Gustavssonfa4cb292020-09-10 08:19:36 +02001143 remove_unwanted_reshapes,
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001144 convert_lrelu,
Louis Verhaardf03bad32020-09-25 08:30:44 +02001145 convert_tanh_sigmoid_to_lut,
Tim Hall79d07d22020-04-27 18:20:16 +01001146 ]
1147
1148 for idx, sg in enumerate(nng.subgraphs):
1149 # rewrite graph pass
1150 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
Dwight Lidman73320a42020-11-05 10:34:41 +01001151 nng, sg, arch, [], op_rewrite_list, rewrite_unsupported=False,
Tim Hall79d07d22020-04-27 18:20:16 +01001152 )
1153
1154 for idx, sg in enumerate(nng.subgraphs):
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001155 # remove passthrough tensors and attempt further optimizations
1156 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
Patrik Gustavsson2349d422020-12-01 16:02:29 +01001157 nng, sg, arch, [remove_passthrough_tensor], [fuse_activation_function_with_prev, add_padding_fields],
Louis Verhaardb9fc33c2020-08-13 11:47:36 +02001158 )
Tim Hall79d07d22020-04-27 18:20:16 +01001159
Tim Halle6ccd872020-11-09 16:46:37 +00001160 # Post-optimisation operator debug tracing
1161 for sg in nng.subgraphs:
1162 rewrite_graph.visit_graph_post_order(sg.output_tensors, arch, [], [_record_optimised])
1163
Tim Hall79d07d22020-04-27 18:20:16 +01001164 if verbose_graph:
1165 nng.print_graph()
1166 return nng
1167
Diego Russoea6111a2020-04-14 18:41:58 +01001168
Tim Hall79d07d22020-04-27 18:20:16 +01001169def optimise_graph_b(nng, arch, verbose_graph=False):
1170 if verbose_graph:
1171 nng.print_graph()
1172
1173 for idx, sg in enumerate(nng.subgraphs):
1174 # combined rewrite graph pass
Dwight Lidmanc6ac1942020-10-02 14:55:45 +02001175 nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order(
patrik.gustavssoneeb85152020-12-21 17:10:40 +00001176 nng, sg, arch, [fixup_unpack_output, fixup_stridedslice_output, rewrite_concat, rewrite_split], [],
Dwight Lidmanc6ac1942020-10-02 14:55:45 +02001177 )
Tim Hall79d07d22020-04-27 18:20:16 +01001178
1179 if verbose_graph:
1180 nng.print_graph()
1181 return nng