Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Early optimisation of the network graph, using the rewrite_graph module to do the traversal of the graph. These are |
| 18 | # split into two parts optimise_graph_a and optimise_graph_b. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 19 | import math |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 20 | import uuid |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame] | 21 | from typing import Tuple |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 22 | |
| 23 | import numpy as np |
| 24 | |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 25 | from . import fp_math |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 26 | from . import lut |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 27 | from . import rewrite_graph |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 28 | from . import scaling |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 29 | from .data_type import DataType |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 30 | from .debug_database import DebugDatabase |
Louis Verhaard | 7db7896 | 2020-05-25 15:05:26 +0200 | [diff] [blame] | 31 | from .errors import UnsupportedFeatureError |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 32 | from .errors import VelaError |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 33 | from .ethos_u55_regs.ethos_u55_regs import resampling_mode |
Louis Verhaard | 8912c53 | 2020-09-30 12:11:49 +0200 | [diff] [blame] | 34 | from .numeric_util import clamp_sigmoid |
Louis Verhaard | e0ef273 | 2020-06-03 08:56:44 +0200 | [diff] [blame] | 35 | from .numeric_util import full_shape |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 36 | from .numeric_util import round_away_zero |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 37 | from .operation import create_activation_function |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 38 | from .operation import NpuBlockType |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 39 | from .operation import Op |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 40 | from .operation import Operation |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 41 | from .operation import Padding |
Fredrik Svedberg | d9c2c42 | 2020-12-01 16:33:45 +0100 | [diff] [blame] | 42 | from .operation_util import create_avgpool_nop |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 43 | from .shape4d import Shape4D |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 44 | from .softmax import SoftMax |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 45 | from .tensor import check_quantized_tens_scaling_equal |
Michael McGeagh | c5b549b | 2020-08-07 11:54:28 +0100 | [diff] [blame] | 46 | from .tensor import create_const_tensor |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 47 | from .tensor import QuantizationParameters |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 48 | from .tensor import Tensor |
Michael McGeagh | 7a6f843 | 2020-12-02 15:29:22 +0000 | [diff] [blame] | 49 | from .tflite_mapping import optype_to_builtintype |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 50 | |
Michael McGeagh | f3e3ad7 | 2020-12-02 12:39:03 +0000 | [diff] [blame] | 51 | passthrough_nodes = (Op.Identity,) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 52 | |
Michael McGeagh | f3e3ad7 | 2020-12-02 12:39:03 +0000 | [diff] [blame] | 53 | memory_only_ops = (Op.Reshape,) |
Michael McGeagh | 11b0bdb | 2020-09-08 11:07:35 +0100 | [diff] [blame] | 54 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 55 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 56 | def remove_passthrough_tensor(tens, arch, nng): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 57 | if len(tens.ops) == 1 and tens.ops[0].type in passthrough_nodes: |
| 58 | assert len(tens.ops[0].inputs) == 1 |
| 59 | tens = tens.ops[0].inputs[0] |
| 60 | return tens |
| 61 | |
| 62 | |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 63 | def rewrite_concat_ops(op, arch): |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 64 | if not op.run_on_npu or not op.type.is_concat_op(): |
| 65 | return op |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 66 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 67 | axis_4D = 0 |
| 68 | ofm = op.ofm |
| 69 | ofm.ops = [] |
| 70 | offset = 0 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 71 | |
Patrik Gustavsson | 7bada40 | 2021-01-28 15:46:21 +0100 | [diff] [blame] | 72 | unfuse_activation_function(op) |
| 73 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 74 | if op.type == Op.Pack: |
| 75 | # Pack is also referred to as Stack |
| 76 | axis = int(op.attrs["axis"]) |
| 77 | desired_shape = op.inputs[0].shape[:axis] + [1] + op.inputs[0].shape[axis:] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 78 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 79 | if axis >= 0: |
| 80 | axis_4D = axis + (4 - len(desired_shape)) |
| 81 | else: |
| 82 | axis_4D = axis |
| 83 | |
| 84 | for idx, inp in enumerate(op.inputs): |
| 85 | op.ifm_shapes[idx] = Shape4D(desired_shape) |
| 86 | if Shape4D(inp.shape) != op.ifm_shapes[idx]: |
| 87 | inp.avoid_NHCWB16 = True |
| 88 | op.type = Op.PackReshaped |
| 89 | |
| 90 | inputs, axis = op.get_concat_inputs_axis() |
| 91 | |
| 92 | for idx, inp in enumerate(inputs): |
| 93 | if op.type != Op.PackReshaped: |
| 94 | op.ifm_shapes[idx] = Shape4D(inp.shape) |
Patrik Gustavsson | 3d73717 | 2020-12-22 10:40:51 +0100 | [diff] [blame] | 95 | if axis >= 0: |
| 96 | axis_4D = axis + (4 - len(inp.shape)) |
| 97 | else: |
| 98 | axis_4D = axis |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 99 | avgpool_op = create_avgpool_nop(op.name + str(idx) + "_avgpool") |
| 100 | avgpool_op.inputs = [inp] |
| 101 | avgpool_op.outputs = [ofm] |
| 102 | avgpool_op.attrs["concat_axis"] = axis_4D |
| 103 | avgpool_op.attrs["concat_start"] = offset |
Tim Hall | 73e843f | 2021-02-04 22:47:46 +0000 | [diff] [blame] | 104 | offset += op.ifm_shapes[idx][axis_4D] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 105 | |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 106 | avgpool_op.attrs["concat_end"] = offset |
| 107 | avgpool_op.run_on_npu = True |
| 108 | ofm.ops.append(avgpool_op) |
| 109 | DebugDatabase.add_optimised(op, avgpool_op) |
| 110 | avgpool_op.ifm_shapes.append(op.ifm_shapes[idx]) |
| 111 | avgpool_op.ofm_shapes.append(op.ofm_shapes[0]) |
Patrik Gustavsson | 2446e59 | 2021-02-11 08:36:12 +0100 | [diff] [blame] | 112 | avgpool_op.memory_function = Op.ConcatSliceWrite |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 113 | assert ofm.shape[axis] == offset |
Patrik Gustavsson | 458a208 | 2020-08-13 13:41:05 +0200 | [diff] [blame] | 114 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 115 | # If axis corresponds to C-dimension, NHCWB16 can only be used in the output if all the concat_start's are a |
| 116 | # multiple of 16. This as, it is only then the address offset for the ofm, for all operations, will be 16 byte |
| 117 | # aligned. For other values of axis the address offsets will be 16 byte aligned, as they are all based on c = 0 |
| 118 | # and those addresses are always 16 byte aligned due to the NHCWB16 format. |
| 119 | if axis == -1 or axis == (len(ofm.shape) - 1): |
| 120 | for op in ofm.ops: |
| 121 | if op.attrs["concat_start"] % 16 != 0: |
| 122 | ofm.avoid_NHCWB16 = True |
| 123 | break |
| 124 | return op |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 125 | |
| 126 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 127 | def rewrite_split_ops(tens, arch, nng): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 128 | |
Patrik Gustavsson | 224e99b | 2021-01-14 10:55:43 +0100 | [diff] [blame] | 129 | if len(tens.ops) == 1 and tens.ops[0].type.is_split_op() and tens.ops[0].type != Op.Unpack: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 130 | split_op = tens.ops[0] |
| 131 | |
| 132 | # Not supported so leave it and run on CPU |
| 133 | if not split_op.run_on_npu: |
| 134 | return tens |
| 135 | |
| 136 | inp, outputs, axis, offset_start, offset_end = split_op.get_split_inputs_axis() |
| 137 | |
| 138 | tens.ops = [] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 139 | new_op = Operation(Op.SplitSliceRead, split_op.name) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 140 | new_op.inputs = [inp] |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 141 | ofm_shape_idx = 0 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 142 | |
| 143 | # For Split the offset cannot be extracted from the tensor so it has to |
| 144 | # be calculated from the index of the output tensor |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 145 | if axis is not None: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 146 | # Get the start and end of the split |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 147 | offset_start = [0] * 4 |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 148 | axis_4D_list = split_op.attrs.get("split_axis_4D", None) # Present for UnpackReshaped and some StridedSlice |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 149 | for idx, out in enumerate(outputs): |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 150 | if axis_4D_list is not None: |
| 151 | axis_4D = axis_4D_list[idx] |
Patrik Gustavsson | 3d73717 | 2020-12-22 10:40:51 +0100 | [diff] [blame] | 152 | else: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 153 | split_op.ofm_shapes[idx] = Shape4D(out.shape) |
| 154 | if axis >= 0: |
| 155 | axis_4D = axis + (4 - len(out.shape)) |
| 156 | else: |
| 157 | axis_4D = axis |
| 158 | |
| 159 | if out == tens: |
| 160 | ofm_shape_idx = idx |
| 161 | break |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 162 | |
Tim Hall | 73e843f | 2021-02-04 22:47:46 +0000 | [diff] [blame] | 163 | offset_start[axis_4D] += split_op.ofm_shapes[idx][axis_4D] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 164 | |
Patrik Gustavsson | eebb1c2 | 2020-08-18 15:03:04 +0200 | [diff] [blame] | 165 | # If start offset is not a multiple of 16 in the C-dimension, NHCWB16 need to be avoided in the input |
| 166 | if (offset_start[-1] % 16) != 0: |
| 167 | inp.avoid_NHCWB16 = True |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 168 | |
Patrik Gustavsson | e3b1b91 | 2021-02-09 15:38:46 +0100 | [diff] [blame] | 169 | new_op.read_offsets[0] = Shape4D.from_list(offset_start, 0) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 170 | new_op.run_on_npu = True |
Michael McGeagh | c5b549b | 2020-08-07 11:54:28 +0100 | [diff] [blame] | 171 | new_op.set_output_tensor(tens) |
Patrik Gustavsson | 224e99b | 2021-01-14 10:55:43 +0100 | [diff] [blame] | 172 | new_op.ifm_shapes.append(Shape4D(inp.shape)) |
Tim Hall | 73e843f | 2021-02-04 22:47:46 +0000 | [diff] [blame] | 173 | new_op.ofm_shapes.append(split_op.ofm_shapes[ofm_shape_idx]) |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 174 | DebugDatabase.add_optimised(split_op, new_op) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 175 | |
| 176 | return tens |
| 177 | |
| 178 | |
Patrik Gustavsson | e3b1b91 | 2021-02-09 15:38:46 +0100 | [diff] [blame] | 179 | def remove_SplitSliceRead(op, arch): |
| 180 | |
| 181 | if op.type == Op.SplitSliceRead: |
| 182 | # Check if it is possible to put the SplitSliceRead on the tensor consumer, or if an avgpool need to be inserted |
| 183 | if ( |
| 184 | len(op.ofm.consumer_list) == 1 |
| 185 | and op.ofm.consumer_list[0] is not None |
| 186 | and op.ofm.consumer_list[0].run_on_npu |
| 187 | and op.ofm.consumer_list[0].type != Op.Reshape |
| 188 | and op.ofm_shapes[0] == Shape4D.from_list(op.ofm.shape) |
| 189 | ): |
| 190 | # SplitSliceRead can be performed by tensor consumer |
| 191 | cons_op = op.ofm.consumer_list[0] |
| 192 | if cons_op.ifm == op.ofm: |
| 193 | cons_op.read_offsets[0] = op.read_offsets[0] |
| 194 | cons_op.set_input_tensor(op.ifm, cons_op.type.info.indices.ifms[0]) |
| 195 | cons_op.ifm_shapes[0] = op.ifm_shapes[0] |
| 196 | elif cons_op.type.is_binary_elementwise_op() and cons_op.ifm2 == op.ofm: |
| 197 | cons_op.read_offsets[1] = op.read_offsets[0] |
| 198 | cons_op.set_input_tensor(op.ifm, cons_op.type.info.indices.ifms[1]) |
| 199 | cons_op.ifm_shapes[1] = op.ifm_shapes[0] |
| 200 | |
| 201 | op.ofm.consumer_list.remove(cons_op) |
| 202 | op.ofm.ops = [] |
| 203 | op.ifm.consumer_list.remove(op) |
| 204 | else: |
| 205 | avgpool_op = create_avgpool_nop(op.name + "_avgpool") |
| 206 | avgpool_op.add_input_tensor(op.ifm) |
| 207 | avgpool_op.outputs = [op.ofm] |
| 208 | op.ofm.ops.remove(op) |
| 209 | op.ofm.ops.append(avgpool_op) |
| 210 | avgpool_op.ifm_shapes.append(op.ifm_shapes[0]) |
| 211 | avgpool_op.ofm_shapes.append(op.ofm_shapes[0]) |
| 212 | avgpool_op.read_offsets[0] = op.read_offsets[0] |
| 213 | |
| 214 | op.ifm.consumer_list.remove(op) |
| 215 | DebugDatabase.add_optimised(op, avgpool_op) |
| 216 | |
| 217 | |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 218 | def insert_copy_op_after_tens(tens): |
| 219 | tens_cons_list_copy = tens.consumer_list.copy() |
| 220 | |
| 221 | # Create a avg_pool nop op with ifm as input |
| 222 | copy_tens = tens.clone() |
| 223 | copy_op = create_avgpool_nop(tens.name + "_avgpool") |
| 224 | copy_op.add_input_tensor(tens) |
| 225 | copy_op.set_output_tensor(copy_tens) |
| 226 | copy_op.set_ifm_ofm_shapes() |
| 227 | copy_op.run_on_npu = True |
| 228 | |
| 229 | # Set copy_ifm consumers |
| 230 | for tens_cons in tens_cons_list_copy: |
| 231 | if tens_cons is not None: |
| 232 | for ifm_idx, cons_inp in enumerate(tens_cons.inputs): |
| 233 | if cons_inp == tens: |
| 234 | tens_cons.set_input_tensor(copy_tens, ifm_idx) |
| 235 | |
| 236 | DebugDatabase.add_optimised(tens.ops[0], copy_op) |
| 237 | |
| 238 | |
| 239 | def fix_sg_input_output(op, arch, nng): |
| 240 | if not op.run_on_npu or op.type != Op.Reshape: |
| 241 | return op |
| 242 | |
Patrik Gustavsson | e3b1b91 | 2021-02-09 15:38:46 +0100 | [diff] [blame] | 243 | # For the Reshape operators we want to remove, tensors are removed. |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 244 | # But in order to to do this, they cannot be outputs of the sg, |
| 245 | # this need to be fixed prior to the removal. |
| 246 | # Solution is to add a avgpool NOP, to maintain the original tensor. |
| 247 | |
| 248 | # Check if operator ifm/ofm are sg ifm/ofm |
| 249 | ifm_is_sg_ifm = op.ifm.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const) |
| 250 | ifm_is_sg_ofm = any(ifm_cons is None for ifm_cons in op.ifm.consumer_list) |
| 251 | ofm_is_sg_ofm = any(ofm_cons is None for ofm_cons in op.ofm.consumer_list) |
| 252 | |
| 253 | if op.type == Op.Reshape and (ifm_is_sg_ofm or ifm_is_sg_ifm) and ofm_is_sg_ofm: |
| 254 | # Both ifm and ofm are sg outputs, only ifm need a copy, in order to remove the Reshape |
| 255 | insert_copy_op_after_tens(op.ifm) |
| 256 | |
| 257 | return op |
| 258 | |
| 259 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 260 | def needed_total_padding(input_size, stride, filter_size): |
| 261 | out_size = (input_size + stride - 1) // stride |
| 262 | needed_input = (out_size - 1) * stride + filter_size |
| 263 | total_padding = max(0, needed_input - input_size) |
| 264 | return total_padding |
| 265 | |
| 266 | |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame] | 267 | def calc_explicit_padding(input_size, stride, filter_size, pad_before, pad_after) -> Tuple[int, int]: |
| 268 | """ |
| 269 | Based on explicit padding provided in a PAD operation, returns the corresponding hardware padding |
| 270 | that provides equivalent results. |
| 271 | """ |
| 272 | total_padding = needed_total_padding(input_size, stride, filter_size) |
| 273 | # The top/left padding can be taken as is from the PAD |
| 274 | output_pad_before = pad_before |
| 275 | # The bottom/right padding might need downward adjustment depending on stride/input size |
| 276 | output_pad_after = pad_after |
| 277 | while output_pad_after > 0 and output_pad_after % stride != (total_padding - pad_before) % stride: |
| 278 | output_pad_after -= 1 |
| 279 | return output_pad_before, output_pad_after |
| 280 | |
| 281 | |
| 282 | def calc_padding_and_skirt(padding_type, kernel, input_shape, explicit_padding): |
| 283 | k_w, k_h = kernel.dilated_wh() |
| 284 | s_x, s_y = kernel.stride |
| 285 | ypad = needed_total_padding(int(input_shape.height), int(s_y), int(k_h)) |
| 286 | xpad = needed_total_padding(int(input_shape.width), int(s_x), int(k_w)) |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 287 | if padding_type == Padding.SAME: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 288 | left_pad = (xpad + 0) // 2 |
| 289 | right_pad = (xpad + 1) // 2 |
| 290 | top_pad = (ypad + 0) // 2 |
| 291 | bottom_pad = (ypad + 1) // 2 |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 292 | elif padding_type == Padding.VALID: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 293 | left_pad = 0 |
| 294 | right_pad = 0 |
| 295 | top_pad = 0 |
| 296 | bottom_pad = 0 |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 297 | elif padding_type == Padding.EXPLICIT: |
| 298 | # Padding is specified in a PAD operator which has been bypassed. |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame] | 299 | top, left, bottom, right = explicit_padding |
| 300 | top_pad, bottom_pad = calc_explicit_padding(int(input_shape.height), int(s_y), int(k_h), int(top), int(bottom)) |
| 301 | left_pad, right_pad = calc_explicit_padding(int(input_shape.width), int(s_x), int(k_w), int(left), int(right)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 302 | else: |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 303 | raise UnsupportedFeatureError(f"Unknown padding") |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 304 | padding = (top_pad, left_pad, bottom_pad, right_pad) |
| 305 | skirt = (top_pad, left_pad, ypad - top_pad, xpad - left_pad) |
| 306 | return padding, skirt |
| 307 | |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 308 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 309 | def calc_upscaled_padding_and_skirt(padding_type, kernel_size, stride, input_shape, upscaling_factor): |
Jacob Bohlin | 9b64ba0 | 2020-07-07 17:15:22 +0200 | [diff] [blame] | 310 | kernel_height, kernel_width = kernel_size[0], kernel_size[1] |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 311 | if padding_type == Padding.SAME: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 312 | ypad = needed_total_padding(int(input_shape.height) * upscaling_factor, int(stride[1]), int(kernel_height)) |
| 313 | xpad = needed_total_padding(int(input_shape.width) * upscaling_factor, int(stride[2]), int(kernel_width)) |
Jacob Bohlin | d47cc27 | 2020-08-24 11:42:14 +0200 | [diff] [blame] | 314 | right_pad = max(((xpad + 1) // upscaling_factor) - 1, 0) |
| 315 | bottom_pad = max(((ypad + 1) // upscaling_factor) - 1, 0) |
Jacob Bohlin | 9b64ba0 | 2020-07-07 17:15:22 +0200 | [diff] [blame] | 316 | left_pad = max(kernel_width - 1 - right_pad, 0) |
| 317 | top_pad = max(kernel_height - 1 - bottom_pad, 0) |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 318 | elif padding_type == Padding.VALID: |
Jacob Bohlin | 9b64ba0 | 2020-07-07 17:15:22 +0200 | [diff] [blame] | 319 | right_pad = max(kernel_width - 2, 0) |
| 320 | bottom_pad = max(kernel_height - 2, 0) |
| 321 | left_pad = kernel_width - 1 |
| 322 | top_pad = kernel_height - 1 |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 323 | else: |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 324 | raise UnsupportedFeatureError(f"Unknown padding") |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 325 | padding = (top_pad, left_pad, bottom_pad, right_pad) |
Jacob Bohlin | 9b64ba0 | 2020-07-07 17:15:22 +0200 | [diff] [blame] | 326 | skirt = padding |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 327 | return padding, skirt |
| 328 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 329 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 330 | def fixup_conv2d_backprop(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 331 | if op.type == Op.Conv2DBackpropInput: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 332 | # flip the inputs |
| 333 | op.inputs[0], op.inputs[2] = op.inputs[2], op.inputs[0] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 334 | op.type = Op.Conv2DBackpropInputSwitchedBias |
Louis Verhaard | 69b8480 | 2020-12-16 12:02:28 +0100 | [diff] [blame] | 335 | op.ifm.resampling_mode = resampling_mode.TRANSPOSE |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 336 | |
| 337 | # Update strides |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 338 | op.attrs.update({"stride_w": 1, "stride_h": 1, "strides": (1, 1, 1, 1)}) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 339 | |
| 340 | return op |
| 341 | |
| 342 | |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 343 | # Convert the op to an elementwise add |
| 344 | def convert_resizebilinear_1x1_to_add(op): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 345 | op.type = Op.Add |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 346 | op.name = op.name + "_add" |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 347 | op.attrs["resizebilinear"] = True |
| 348 | # Create an input tensor filled with zeros |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 349 | shape = op.ofm_shapes[0].as_list() |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 350 | tens = Tensor(shape, op.inputs[0].dtype, op.inputs[1].name + "_add") |
| 351 | tens.values = np.zeros(shape) |
| 352 | tens.quant_values = np.zeros(shape, np.uint8) |
| 353 | tens.quantization = QuantizationParameters(0.0, 255.0) |
| 354 | tens.quantization.scale_f32 = 1.0 |
| 355 | tens.quantization.zero_point = 0 |
| 356 | tens.consumer_list = [op] |
| 357 | tens_op = op.inputs[1].ops[0] |
Michael McGeagh | c5b549b | 2020-08-07 11:54:28 +0100 | [diff] [blame] | 358 | tens_op.set_output_tensor(tens) |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 359 | # Set the add inputs |
| 360 | op.inputs[1] = op.inputs[0] |
| 361 | op.inputs[0] = tens |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 362 | op.set_ifm_ofm_shapes() |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 363 | |
| 364 | return op |
| 365 | |
| 366 | |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 367 | # Convert ResizeBilinear to a number of 2x2 pool ops |
| 368 | def convert_resizebilinear_to_2x2_pool(op): |
| 369 | count = 0 |
| 370 | pre_op = op |
| 371 | outputs = op.outputs |
| 372 | |
| 373 | op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)}) |
| 374 | if op.attrs["align_corners"]: |
| 375 | shape_modifier = 1 |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 376 | op.attrs["padding"] = Padding.VALID |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 377 | else: |
| 378 | shape_modifier = 0 |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 379 | op.attrs["padding"] = Padding.SAME |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 380 | op.inputs[0].resampling_mode = resampling_mode.NEAREST |
| 381 | |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 382 | upscaled_shape = np.array(op.ifm_shapes[0].get_hw_as_list()) |
| 383 | out_shape = np.array(op.ofm_shapes[0].get_hw_as_list()) |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 384 | if (upscaled_shape == upscaled_shape * 2 - shape_modifier).all(): |
| 385 | return op |
| 386 | |
| 387 | while (upscaled_shape < out_shape).all(): |
| 388 | if count == 0: |
| 389 | scaled_op = pre_op |
| 390 | else: |
| 391 | scaled_op = op.clone("_{}".format(count)) |
| 392 | scaled_op.inputs[0] = pre_op.outputs[0] |
| 393 | |
| 394 | upscaled_shape = upscaled_shape * 2 - shape_modifier |
| 395 | |
| 396 | if (upscaled_shape == out_shape).all(): |
| 397 | scaled_op.outputs = outputs |
| 398 | scaled_op.outputs[0].ops = [scaled_op] |
| 399 | else: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 400 | shape = op.ofm_shapes[0].as_list() |
| 401 | shape[1:3] = upscaled_shape |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 402 | out_tens = Tensor(shape, DataType.int16, "{}_{}".format(op.outputs[0].name, count)) |
| 403 | out_tens.quantization = op.outputs[0].quantization.clone() |
| 404 | out_tens.quantization.quant_min = np.iinfo(np.int16).min |
| 405 | out_tens.quantization.quant_max = np.iinfo(np.int16).max |
| 406 | scaled_op.set_output_tensor(out_tens) |
| 407 | pre_op = scaled_op |
| 408 | count += 1 |
| 409 | |
| 410 | # Setup the scale value |
| 411 | if scaled_op.inputs[0].dtype.bits == 8 and scaled_op.outputs[0].dtype.bits == 16: |
Fredrik Svedberg | e82be7c | 2021-01-18 15:21:03 +0100 | [diff] [blame] | 412 | scaled_op.rescale = 128 |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 413 | elif scaled_op.inputs[0].dtype.bits == 16 and scaled_op.outputs[0].dtype.bits == 8: |
Fredrik Svedberg | e82be7c | 2021-01-18 15:21:03 +0100 | [diff] [blame] | 414 | scaled_op.rescale = 1 / 128 |
| 415 | else: |
| 416 | scaled_op.rescale = None |
Patrik Gustavsson | cc6915c | 2020-12-22 09:16:50 +0100 | [diff] [blame] | 417 | scaled_op.set_ifm_ofm_shapes() |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 418 | |
| 419 | return op |
| 420 | |
| 421 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 422 | def fixup_resizebilinear(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 423 | if op.type == Op.ResizeBilinear and op.run_on_npu: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 424 | if op.ifm_shapes[0] == op.ofm_shapes[0]: |
Charles Xu | 36ffaf3 | 2020-08-05 15:40:44 +0200 | [diff] [blame] | 425 | # Bypass nop resizebilinear |
| 426 | op.inputs = op.inputs[:1] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 427 | op.type = Op.Identity |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 428 | elif op.ifm_shapes[0].height == 1 and op.ifm_shapes[0].width == 1: |
Charles Xu | 87c1350 | 2020-08-06 12:17:26 +0200 | [diff] [blame] | 429 | convert_resizebilinear_1x1_to_add(op) |
| 430 | else: |
| 431 | convert_resizebilinear_to_2x2_pool(op) |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 432 | |
| 433 | return op |
| 434 | |
| 435 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 436 | def convert_nop_split_to_identity(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 437 | if op.type == Op.Split and op.attrs.get("num_splits") == 1: |
Dwight Lidman | c3862c2 | 2020-09-14 15:22:33 +0200 | [diff] [blame] | 438 | # the list comprehension should return a list with a single tensor |
| 439 | # if it shouldn't, remove_passthrough_tensor will fail appropriately |
| 440 | op.inputs = [i for i in op.inputs if i.shape == op.outputs[0].shape] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 441 | op.type = Op.Identity |
Dwight Lidman | c3862c2 | 2020-09-14 15:22:33 +0200 | [diff] [blame] | 442 | return op |
| 443 | |
| 444 | |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 445 | def rewrite_fully_connected_input(op, arch, nng): |
| 446 | if op.type == Op.FullyConnected: |
| 447 | n_in_elems = op.weights.shape[-2] |
| 448 | elms = op.ifm.elements() |
| 449 | batch_size = elms // n_in_elems |
| 450 | assert batch_size * n_in_elems == elms |
| 451 | |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 452 | op.ifm_shapes[0] = Shape4D([batch_size, 1, 1, n_in_elems]) |
Patrik Gustavsson | da2b003 | 2021-02-04 16:28:29 +0100 | [diff] [blame] | 453 | if Shape4D(op.ifm.shape) != op.ifm_shapes[0]: |
| 454 | op.ifm.avoid_NHCWB16 = True |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 455 | return op |
| 456 | |
| 457 | |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 458 | def convert_batched_fc_shape(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 459 | if op.type == Op.FullyConnected: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 460 | # Check if the first dimension indicates batching |
| 461 | if op.ifm_shapes[0].batch > 1: |
Patrik Gustavsson | cb33704 | 2020-09-16 14:55:40 +0200 | [diff] [blame] | 462 | batching_split = {4: (2, 2), 8: (2, 4), 16: (4, 4)} |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 463 | n = op.ifm_shapes[0].batch |
Patrik Gustavsson | cb33704 | 2020-09-16 14:55:40 +0200 | [diff] [blame] | 464 | h, w = batching_split.get(n, (1, n)) |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 465 | op.ifm_shapes[0] = Shape4D([1, h, w, op.ifm_shapes[0].depth]) |
Patrik Gustavsson | cb33704 | 2020-09-16 14:55:40 +0200 | [diff] [blame] | 466 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 467 | op.ifm.avoid_NHCWB16 = True |
Patrik Gustavsson | cb33704 | 2020-09-16 14:55:40 +0200 | [diff] [blame] | 468 | |
| 469 | # Reshape Weights to be 4D. IO becomes HWIO |
| 470 | weight_tensor = op.inputs[1] |
| 471 | weight_tensor.quant_values = np.expand_dims(np.expand_dims(weight_tensor.quant_values, axis=0), axis=0) |
| 472 | weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape)) |
| 473 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 474 | n = op.ofm_shapes[0].batch |
| 475 | h, w = batching_split.get(n, (1, n)) |
| 476 | op.ofm_shapes[0] = Shape4D([1, h, w, op.ofm_shapes[0].depth]) |
| 477 | op.ofm.avoid_NHCWB16 = True |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 478 | return op |
| 479 | |
| 480 | |
Patrik Gustavsson | 7bada40 | 2021-01-28 15:46:21 +0100 | [diff] [blame] | 481 | def unfuse_activation_function(op): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 482 | if op.type == Op.ConcatTFLite and op.run_on_npu and op.activation is not None: |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 483 | act_op = Operation(op.activation.op_type, op.name + op.activation.op_type.name) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 484 | op.activation = None |
Fredrik Svedberg | 0f98b36 | 2020-09-29 10:00:39 +0200 | [diff] [blame] | 485 | out_tens = op.outputs[0] |
| 486 | intermediate_tens = out_tens.clone("_act_intermediate") |
| 487 | act_op.set_output_tensor(out_tens) |
| 488 | act_op.add_input_tensor(intermediate_tens) |
| 489 | op.set_output_tensor(intermediate_tens) |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 490 | act_op.set_ifm_ofm_shapes() |
Fredrik Svedberg | 0f98b36 | 2020-09-29 10:00:39 +0200 | [diff] [blame] | 491 | |
Louis Verhaard | 8912c53 | 2020-09-30 12:11:49 +0200 | [diff] [blame] | 492 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 493 | def rewrite_stridedslice_output(op, arch, nng): |
| 494 | if not op.run_on_npu or op.type != Op.StridedSlice: |
| 495 | return op |
| 496 | |
| 497 | new_axis_mask = op.attrs["new_axis_mask"] |
| 498 | shrink_axis_mask = op.attrs["shrink_axis_mask"] |
| 499 | |
| 500 | if shrink_axis_mask == 0 and new_axis_mask == 0: |
| 501 | return op |
| 502 | |
| 503 | axis_4D = [0] * len(op.outputs) |
| 504 | for idx, out_tens in enumerate(op.outputs): |
| 505 | output_shape = list(out_tens.shape) |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 506 | |
Dwight Lidman | 73320a4 | 2020-11-05 10:34:41 +0100 | [diff] [blame] | 507 | if shrink_axis_mask != 0: |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 508 | n = 0 |
| 509 | axis = 0 |
| 510 | while shrink_axis_mask: |
| 511 | prev_mask = shrink_axis_mask |
| 512 | n += 1 |
| 513 | shrink_axis_mask &= shrink_axis_mask - 1 |
| 514 | axis = int(math.log2(prev_mask - shrink_axis_mask)) |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 515 | output_shape = output_shape[:axis] + [1] + output_shape[axis:] |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 516 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 517 | assert len(out_tens.shape) == (len(op.inputs[0].shape) - n) |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 518 | op.attrs["shrink_axis_mask"] = 0 |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 519 | if axis >= 0: |
| 520 | axis_4D[idx] = axis + (4 - len(output_shape)) |
| 521 | else: |
| 522 | axis_4D[idx] = axis |
| 523 | op.ofm_shapes[idx] = Shape4D(output_shape) |
| 524 | |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 525 | elif new_axis_mask != 0: |
| 526 | n = 0 |
| 527 | axis = 0 |
| 528 | while new_axis_mask: |
| 529 | prev_mask = new_axis_mask |
| 530 | n += 1 |
| 531 | new_axis_mask &= new_axis_mask - 1 |
| 532 | axis = int(math.log2(prev_mask - new_axis_mask)) |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 533 | output_shape = output_shape[:axis] + output_shape[(axis + 1) :] |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 534 | new_axis_mask >>= 1 |
| 535 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 536 | assert len(out_tens.shape) == (len(op.inputs[0].shape) + n) |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 537 | op.attrs["new_axis_mask"] = 0 |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 538 | if axis >= 0: |
| 539 | axis_4D[idx] = axis + (4 - len(output_shape)) |
| 540 | else: |
| 541 | axis_4D[idx] = axis |
| 542 | op.ofm_shapes[idx] = Shape4D(output_shape) |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 543 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 544 | if op.ofm_shapes[idx] != Shape4D(out_tens.shape): |
| 545 | out_tens.avoid_NHCWB16 = True |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 546 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 547 | op.attrs["split_axis_4D"] = axis_4D |
| 548 | return op |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 549 | |
| 550 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 551 | def rewrite_unpack_output(op, arch, nng): |
| 552 | tens = op.outputs[0] |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 553 | if op.run_on_npu and op.type == Op.Unpack: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 554 | # Unpack is also referred to as Unstack |
Diqing Zhong | c7c0b1b | 2020-10-26 11:45:25 +0100 | [diff] [blame] | 555 | axis = int(op.attrs["axis"]) |
| 556 | op.type = Op.UnpackReshaped |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 557 | desired_output_shape = tens.shape[:axis] + [1] + tens.shape[axis:] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 558 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 559 | if axis >= 0: |
| 560 | axis_4D = axis + (4 - len(desired_output_shape)) |
| 561 | else: |
| 562 | axis_4D = axis |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 563 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 564 | axis_4D_list = [0] * len(op.outputs) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 565 | for idx, out_tens in enumerate(op.outputs): |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 566 | op.ofm_shapes[idx] = Shape4D(desired_output_shape) |
| 567 | axis_4D_list[idx] = axis_4D |
| 568 | if op.ofm_shapes[idx] != Shape4D(out_tens.shape): |
| 569 | out_tens.avoid_NHCWB16 = True |
Michael McGeagh | c5b549b | 2020-08-07 11:54:28 +0100 | [diff] [blame] | 570 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 571 | op.attrs["split_axis_4D"] = axis_4D_list |
| 572 | return op |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 573 | |
| 574 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 575 | def add_padding_fields(op, arch, nng): |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 576 | if op.run_on_npu: |
| 577 | if "padding" in op.attrs: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 578 | input_shape = op.ifm_shapes[0] |
| 579 | output_shape = op.ofm_shapes[0] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 580 | if op.type.is_conv2d_op() or op.type.is_depthwise_conv2d_op(): |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 581 | kernel_size = op.inputs[1].shape[:2] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 582 | elif op.type.is_pool_op() or op.type.npu_block_type == NpuBlockType.ReduceSum: |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 583 | kernel_size = op.attrs["ksize"][1:3] |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 584 | else: |
Michael McGeagh | 7a6f843 | 2020-12-02 15:29:22 +0000 | [diff] [blame] | 585 | raise UnsupportedFeatureError(f"Unknown operation that uses padding: {optype_to_builtintype(op.type)}") |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 586 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 587 | if op.type == Op.Conv2DBackpropInputSwitchedBias: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 588 | upscaling_factor = output_shape.height // input_shape.height |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 589 | padding, skirt = calc_upscaled_padding_and_skirt( |
| 590 | op.attrs["padding"], kernel_size, op.attrs["strides"], input_shape, upscaling_factor |
| 591 | ) |
| 592 | else: |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 593 | padding, skirt = calc_padding_and_skirt( |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame] | 594 | op.attrs["padding"], op.kernel, input_shape, op.attrs.get("explicit_padding"), |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 595 | ) |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 596 | |
Jacob Bohlin | 90033f3 | 2020-08-28 15:45:44 +0200 | [diff] [blame] | 597 | op.attrs["explicit_padding"] = padding |
| 598 | op.attrs["skirt"] = skirt |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 599 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 600 | return op |
| 601 | |
| 602 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 603 | def convert_depthwise_to_conv(op, arch, nng): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 604 | # Depthwise is equivalent to a single conv2d if the ifm depth is 1 and |
| 605 | # the ofm depth equals the depth multipler. |
| 606 | # If those conditions are true, then we can perform a simple |
| 607 | # switch of the operator type (and weight order) |
| 608 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 609 | if op.type == Op.DepthwiseConv2DBias and (op.attrs["depth_multiplier"] != 1): |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 610 | ifm_shape = op.ifm_shapes[0] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 611 | weight_tensor = op.inputs[1] |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 612 | ofm_shape = op.ofm_shapes[0] |
| 613 | if (ifm_shape.depth == 1) and (ofm_shape.depth == op.attrs["depth_multiplier"]): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 614 | # Change op type to Conv2d |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 615 | op.type = Op.Conv2DBias |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 616 | del op.attrs["channel_multiplier"] |
| 617 | del op.attrs["depth_multiplier"] |
| 618 | |
| 619 | weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2)) |
Michael McGeagh | 6a8d424 | 2020-07-28 12:17:59 +0100 | [diff] [blame] | 620 | weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 621 | else: |
Louis Verhaard | 7db7896 | 2020-05-25 15:05:26 +0200 | [diff] [blame] | 622 | raise UnsupportedFeatureError( |
Michael McGeagh | 7a6f843 | 2020-12-02 15:29:22 +0000 | [diff] [blame] | 623 | f"Unsupported 'DEPTHWISE_CONV_2D' with depth_multiplier = {op.attrs['depth_multiplier']},", |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 624 | f" ifm channels = {ifm_shape.depth}, ofm channels = {ofm_shape.depth}", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 625 | ) |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 626 | DebugDatabase.add_optimised(op, op) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 627 | return op |
| 628 | |
| 629 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 630 | def reorder_depthwise_weights(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 631 | if op.type.is_depthwise_conv2d_op(): |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 632 | weight_tensor = op.inputs[1] |
| 633 | weight_tensor.quant_values = np.transpose(weight_tensor.quant_values, (0, 1, 3, 2)) |
Michael McGeagh | 6a8d424 | 2020-07-28 12:17:59 +0100 | [diff] [blame] | 634 | weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape)) |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 635 | weight_tensor.weight_transpose_depthwise = True |
| 636 | |
| 637 | return op |
| 638 | |
| 639 | |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 640 | def optimise_strided_conv(op, arch, nng): |
| 641 | stride_x, stride_y = op.get_kernel_stride() |
| 642 | ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm() |
| 643 | |
| 644 | if ( |
| 645 | op.type == Op.Conv2DBias |
| 646 | and op.op_index == 0 |
| 647 | and stride_x == 2 |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 648 | and op.ifm_shapes[0].depth <= 4 |
| 649 | and op.ifm_shapes[0].width % 2 == 0 |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 650 | and weight_tensor is not None |
| 651 | and weight_tensor.shape[1] >= 2 |
| 652 | ): |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 653 | ifm_shape = op.ifm_shapes[0] |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 654 | # IFM |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 655 | op.ifm_shapes[0] = Shape4D([ifm_shape.batch, ifm_shape.height, ifm_shape.width // 2, ifm_shape.depth * 2]) |
| 656 | op.ifm.avoid_NHCWB16 = True |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 657 | |
| 658 | # Weights |
| 659 | weight_shape = weight_tensor.shape |
| 660 | if weight_shape[1] % 2 != 0: |
| 661 | weight_shape[1] = weight_shape[1] + 1 |
| 662 | padded_array = np.zeros(weight_shape) |
| 663 | for i in range(weight_shape[0]): |
| 664 | padded_array[i] = np.vstack( |
| 665 | [ |
| 666 | weight_tensor.quant_values[i], |
| 667 | np.full((1, weight_shape[2], weight_shape[3]), weight_tensor.quantization.zero_point), |
| 668 | ] |
| 669 | ) |
| 670 | weight_tensor.quant_values = padded_array |
| 671 | weight_shape[1] //= 2 |
| 672 | weight_shape[2] *= 2 |
| 673 | weight_tensor.quant_values = np.reshape(weight_tensor.quant_values, weight_shape) |
| 674 | weight_tensor.set_all_shapes(weight_shape) |
| 675 | # If multiple copies of the weights are used, we could avoid |
| 676 | # them having the same address by changing the value_id |
| 677 | weight_tensor.value_id = uuid.uuid4() |
| 678 | |
| 679 | # Strides |
| 680 | stride_x = 1 |
| 681 | op.attrs.update({"stride_w": stride_x, "stride_h": stride_y, "strides": (1, stride_y, stride_x, 1)}) |
| 682 | |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 683 | return op |
| 684 | |
| 685 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 686 | def convert_conv_to_fc(op, arch, nng): |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 687 | # Conv 1x1 can be equivalent to Fully Connected. |
| 688 | # By representing certain convs as fully connected layers, Vela can better determine wether or not to use |
| 689 | # caching/double buffering for the weights. |
| 690 | # (Weights dont need to be reloaded for convs when IFM H and W are 1) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 691 | if op.type == Op.Conv2DBias: |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 692 | h = op.ifm_shapes[0].height |
| 693 | w = op.ifm_shapes[0].width |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 694 | kh, kw, _, _ = op.inputs[1].shape |
| 695 | if h == 1 and w == 1 and kh == 1 and kw == 1: |
| 696 | # Overwrite this op as a Fully Connected Op |
| 697 | op.name += "_fc" |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 698 | op.type = Op.FullyConnected |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 699 | op.attrs = { |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 700 | "weights_format": 0, |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 701 | } |
| 702 | # Reshape Weights to be 2D. HWIO becomes just IO (as H and W are 1, they can just be dropped) |
| 703 | weight_tensor = op.inputs[1] |
| 704 | weight_tensor.quant_values = weight_tensor.quant_values.squeeze(axis=(0, 1)) |
| 705 | weight_tensor.set_all_shapes(list(weight_tensor.quant_values.shape)) |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 706 | |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 707 | DebugDatabase.add_optimised(op, op) |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 708 | return op |
| 709 | |
| 710 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 711 | def fixup_relus_with_differing_ifm_ofm_scaling(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 712 | if op.run_on_npu and op.type.is_relu_op(): |
Michael McGeagh | 8dbf8cf | 2020-09-08 11:09:48 +0100 | [diff] [blame] | 713 | ifm = op.inputs[0] |
| 714 | ofm = op.outputs[0] |
| 715 | # Relu with differing IFM and OFM scaling cannot be fused with another primary op |
| 716 | # and requires its own to be inserted |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 717 | if not check_quantized_tens_scaling_equal(ifm, ofm): |
Michael McGeagh | 8dbf8cf | 2020-09-08 11:09:48 +0100 | [diff] [blame] | 718 | # Override this op with its own primary op (avgpool) |
| 719 | relu_fused_op = create_avgpool_nop(op.name + "_avgpool") |
| 720 | # And fuse the original activation function to it |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 721 | relu_fused_op.activation = create_activation_function(op.type) |
Michael McGeagh | 8dbf8cf | 2020-09-08 11:09:48 +0100 | [diff] [blame] | 722 | # Tidy up and assign the ifm and ofm to the new op |
| 723 | ifm.consumer_list.remove(op) |
Andreas Nevalainen | f3d737e | 2020-09-25 14:12:43 +0200 | [diff] [blame] | 724 | |
Michael McGeagh | 8dbf8cf | 2020-09-08 11:09:48 +0100 | [diff] [blame] | 725 | relu_fused_op.add_input_tensor(ifm) |
| 726 | relu_fused_op.set_output_tensor(ofm) |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 727 | relu_fused_op.set_ifm_ofm_shapes() |
Michael McGeagh | 8dbf8cf | 2020-09-08 11:09:48 +0100 | [diff] [blame] | 728 | op = relu_fused_op |
| 729 | return op |
| 730 | |
| 731 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 732 | def fixup_elementwise_with_scalars(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 733 | if op.type.is_binary_elementwise_op(): |
Louis Verhaard | e0ef273 | 2020-06-03 08:56:44 +0200 | [diff] [blame] | 734 | ifm_tensor, ifm2_tensor, _, _ = op.get_ifm_ifm2_weights_ofm() |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 735 | if ifm2_tensor.shape != [] and ifm_tensor.shape != []: |
| 736 | diff = len(ifm_tensor.shape) - len(ifm2_tensor.shape) |
| 737 | if diff > 0: |
| 738 | ifm2_tensor.shape = full_shape(len(ifm_tensor.shape), ifm2_tensor.shape, 1) |
| 739 | elif diff < 0: |
| 740 | ifm_tensor.shape = full_shape(len(ifm2_tensor.shape), ifm_tensor.shape, 1) |
Louis Verhaard | e0ef273 | 2020-06-03 08:56:44 +0200 | [diff] [blame] | 741 | elif ifm_tensor.shape == [] and ifm_tensor.quant_values is None: |
| 742 | # IFM is marked as a scalar, but is a result of an operation; change it to a shape of size 1 |
| 743 | ifm_tensor.shape = len(ifm2_tensor.shape) * [1] |
| 744 | ifm_tensor.storage_shape = ifm_tensor.shape |
| 745 | elif ifm2_tensor.shape == [] and ifm2_tensor.quant_values is None: |
| 746 | # IFM2 is marked as a scalar, but is a result of an operation; change it to a shape of size 1 |
| 747 | ifm2_tensor.shape = len(ifm_tensor.shape) * [1] |
| 748 | ifm2_tensor.storage_shape = ifm2_tensor.shape |
Charles Xu | 7879222 | 2020-05-13 10:15:26 +0200 | [diff] [blame] | 749 | return op |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 750 | |
Louis Verhaard | e0ef273 | 2020-06-03 08:56:44 +0200 | [diff] [blame] | 751 | |
Tim Hall | 4e12776 | 2020-05-15 16:05:49 +0100 | [diff] [blame] | 752 | # Set input/output tensor equivalence to the same id for memory operations |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 753 | def set_tensor_equivalence(op, arch, nng): |
Michael McGeagh | 11b0bdb | 2020-09-08 11:07:35 +0100 | [diff] [blame] | 754 | if op.type in memory_only_ops: |
Tim Hall | 4e12776 | 2020-05-15 16:05:49 +0100 | [diff] [blame] | 755 | eid = op.outputs[0].equivalence_id |
| 756 | for inp in op.inputs: |
| 757 | inp.equivalence_id = eid |
| 758 | return op |
| 759 | |
| 760 | |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 761 | def set_ifm_ofm_op_shapes(op, arch, nng): |
| 762 | if op.run_on_npu and op.type.needs_shapes(): |
| 763 | if op.ifm_shapes or op.ofm_shapes: |
| 764 | # Shapes already set |
| 765 | return op |
| 766 | op.set_ifm_ofm_shapes() |
| 767 | return op |
| 768 | |
| 769 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 770 | def convert_softmax(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 771 | if op.type == Op.Softmax and op.run_on_npu: |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 772 | softmax = SoftMax(op) |
| 773 | op = softmax.get_graph() |
| 774 | return op |
| 775 | |
| 776 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 777 | def convert_mul_max_to_abs_or_lrelu(op, arch, nng): |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 778 | r"""Whenever there is a subgraph with this topology: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 779 | |
| 780 | Input X For X = -1 or X > 0 |
| 781 | | \ / This subgraph can be replaced with either |
| 782 | | Mul an Abs (if X = -1) or a LeakyReLU (if X > 0) |
| 783 | | / |
| 784 | Max |
| 785 | """ |
| 786 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 787 | if op.type == Op.Maximum: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 788 | # finds the Mul input(s) to the Max |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 789 | muls = [i for i in op.inputs if i.ops[0].type == Op.Mul] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 790 | if len(muls) == 1: |
| 791 | mul = muls[0].ops[0] |
| 792 | elif len(muls) == 2: |
| 793 | # In the case both inputs are Muls, find the one with the same input as the Max |
| 794 | mul = [m for m in muls if len(set(op.inputs + m.ops[0].inputs)) == 1][0].ops[0] |
| 795 | else: |
| 796 | # No Mul inputs |
| 797 | return op |
| 798 | |
| 799 | # make sure the Mul doesn't have any other consumers |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 800 | mul_ofm = mul.outputs[0] |
| 801 | if len(mul_ofm.consumers()) != 1: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 802 | return op |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 803 | # make sure the Mul doesn't have a fused activation function |
| 804 | if mul.activation: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 805 | return op |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 806 | ifm, ofm = op.get_ifm_ofm() |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 807 | if ifm is None or ofm is None: |
| 808 | return op |
| 809 | |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 810 | if ifm.dtype not in (DataType.uint8, DataType.int8) or ifm.dtype != ofm.dtype: |
| 811 | return op |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 812 | if not check_quantized_tens_scaling_equal(ifm, ofm) or not check_quantized_tens_scaling_equal(ifm, mul_ofm): |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 813 | # rewrite to LeakyRelu currently only makes sense if the quantization is identical |
| 814 | return op |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 815 | |
| 816 | # finds the branched input that goes to both the Max and the Mul |
| 817 | shared = set(op.inputs) & set(mul.inputs) |
| 818 | if len(shared) == 1: |
| 819 | shared_in = shared.pop() |
| 820 | # find the constant scalar input to the Mul |
| 821 | const_tens = (set(mul.inputs) - {shared_in}).pop() |
| 822 | # check that it is a scalar |
| 823 | if const_tens.shape != []: |
| 824 | return op |
| 825 | const = const_tens.ops[0] |
| 826 | # check that it is a constant |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 827 | if const.type != Op.Const: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 828 | return op |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 829 | # Remove the Mul from the shared input's consumers |
| 830 | shared_in.consumer_list.remove(mul) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 831 | else: |
| 832 | return op |
| 833 | |
| 834 | val = const.outputs[0].values |
| 835 | if val >= 0: |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 836 | new_op = Op.LeakyRelu |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 837 | op.attrs["alpha"] = val |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 838 | # to produce bit exact results, the alpha is not enough; |
| 839 | # save additional scaling info in attr "alpha_scale", to be used as input |
| 840 | # to the LUT construction |
| 841 | alpha_scalar = const_tens.quant_values - const_tens.quantization.zero_point |
| 842 | mul_ifm_scale = np.double(ifm.quantization.scale_f32) |
| 843 | mul_ifm2_scale = np.double(const_tens.quantization.scale_f32) |
| 844 | mul_ofm_scale = np.double(mul_ofm.quantization.scale_f32) |
| 845 | alpha_scale, alpha_shift = scaling.elementwise_mul_scale(mul_ifm_scale, mul_ifm2_scale, mul_ofm_scale) |
| 846 | op.attrs["alpha_scaling"] = (alpha_scalar, alpha_scale, alpha_shift) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 847 | elif val == -1: |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 848 | new_op = Op.Abs |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 849 | else: |
| 850 | return op |
| 851 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 852 | op.type = new_op |
| 853 | op.name = op.name.replace("Maximum", new_op.name) |
| 854 | op.outputs[0].name = op.outputs[0].name.replace("Maximum", new_op.name) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 855 | op.inputs = [shared_in] |
Patrik Gustavsson | c509d33 | 2020-12-22 13:53:52 +0100 | [diff] [blame] | 856 | op.set_ifm_ofm_shapes() |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 857 | |
| 858 | # Record optimisation in debug database |
| 859 | DebugDatabase.add_optimised(op, op) |
| 860 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 861 | return op |
| 862 | |
| 863 | |
Diqing Zhong | 189f748 | 2021-01-26 12:12:51 +0100 | [diff] [blame] | 864 | def convert_hardswish_to_lut(op, arch, nng): |
| 865 | if op.type == Op.HardSwish: |
| 866 | ifm, ofm = op.get_ifm_ofm() |
| 867 | # Generate the LUT |
| 868 | ifm_scale = np.double(ifm.quantization.scale_f32) |
| 869 | ofm_scale = np.double(ofm.quantization.scale_f32) |
| 870 | zp_in = ifm.quantization.zero_point |
| 871 | zp_out = ofm.quantization.zero_point |
| 872 | ifm_scale_hires = (1 / 128) * ifm_scale |
| 873 | relu_multiplier = np.double(3 / 32768) |
| 874 | out_scale, out_shift = scaling.quantise_scale(ifm_scale_hires / ofm_scale) |
| 875 | relu_scale, relu_shift = scaling.quantise_scale(ifm_scale_hires / relu_multiplier) |
| 876 | # Use 16bit scale |
| 877 | out_scale_16 = fp_math.downscale_multiplier_int32_to_int16(out_scale) |
| 878 | relu_scale_16 = fp_math.downscale_multiplier_int32_to_int16(relu_scale) |
| 879 | |
| 880 | values = [] |
| 881 | ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128) |
| 882 | quantized_min = min(ix) |
| 883 | quantized_max = max(ix) |
| 884 | for x in ix: |
| 885 | input_value = x - zp_in |
| 886 | input_value_hires = input_value * 128 |
| 887 | # Compute the input value on essentially the output scale, not shifted yet |
| 888 | input_value_preshift = fp_math.saturating_rounding_mul16(input_value_hires, out_scale_16) |
| 889 | # Compute the "relu-ish multiplier". This matches the code in TensorFlow Lite Micro kernel |
| 890 | relu_value = np.int16(input_value_hires) |
| 891 | if relu_shift < 31: |
| 892 | relu_value = fp_math.shift_left16(relu_value, 30 - relu_shift) |
| 893 | |
| 894 | relu_value = fp_math.saturating_rounding_mul16(relu_value, relu_scale_16) |
| 895 | |
| 896 | if relu_shift < 31: |
| 897 | relu_value = fp_math.shift_left16(relu_value, 1) |
| 898 | |
| 899 | if relu_shift > 31: |
| 900 | relu_value = fp_math.rounding_divide_by_pot(relu_value, relu_shift - 31) |
| 901 | |
| 902 | # Rescaled the value into a 16bit fixedpoint relu_value in [-1, 1] |
| 903 | # Now convert that to a 16bit fixedpoint value in [0, 1] |
| 904 | relu_value = (relu_value + (1 << 15)) >> 1 |
| 905 | lut_result = fp_math.saturating_mul16(relu_value, input_value_preshift) |
| 906 | shift = 31 - out_shift |
| 907 | shift = -shift if shift < 0 else 0 |
| 908 | # Finally apply the output shift |
| 909 | lut_result = fp_math.rounding_divide_by_pot(lut_result, shift) + zp_out |
| 910 | lut_result = min(quantized_max, max(quantized_min, lut_result)) |
| 911 | values.append(lut_result) |
| 912 | return convert_to_lut(op, values, "hardswish") |
| 913 | return op |
| 914 | |
| 915 | |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 916 | def convert_lrelu_to_mul_max(op, arch): |
| 917 | # Converts LeakyRelu to Max(alpha * IFM, identity * IFM) |
| 918 | # (the opposite of convert_mul_max_to_abs_or_lrelu) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 919 | ifm, ofm = op.get_ifm_ofm() |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 920 | if ifm is None or ofm is None: |
| 921 | return op |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 922 | |
| 923 | # Add multiplication with alpha |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 924 | mul_alpha = Operation(Op.Mul, op.name + "_mul_alpha") |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 925 | mul_alpha.add_input_tensor(ifm) |
| 926 | # Create const tensor containing alpha as scalar |
| 927 | alpha = op.attrs["alpha"] |
| 928 | quantization = ifm.quantization.clone() |
| 929 | quantization.min = 0 |
| 930 | quantization.max = alpha * (quantization.quant_max - quantization.quant_min) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 931 | quantization.zero_point = 0 |
Louis Verhaard | ece4e65 | 2021-01-07 13:35:47 +0100 | [diff] [blame] | 932 | if np.isinf(1 / np.float32(alpha)): |
| 933 | # Handling of alpha near zero |
| 934 | quantization.scale_f32 = 1 |
| 935 | scalar = 0 |
| 936 | else: |
| 937 | quantization.scale_f32 = alpha |
| 938 | scalar = 1 |
| 939 | alpha_tens = create_const_tensor( |
| 940 | op.name + "_alpha_scalar", [], ifm.dtype, [scalar], np.int8, quantization=quantization |
| 941 | ) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 942 | mul_alpha.add_input_tensor(alpha_tens) |
| 943 | fm_alpha = ofm.clone(op.name + "_alpha") |
| 944 | mul_alpha.set_output_tensor(fm_alpha) |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 945 | mul_alpha.set_ifm_ofm_shapes() |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 946 | DebugDatabase.add_optimised(op, mul_alpha) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 947 | |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 948 | if check_quantized_tens_scaling_equal(ifm, ofm): |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 949 | # No identity multiplication is needed |
| 950 | fm_id = ifm |
| 951 | else: |
| 952 | # Add multiplication with identity |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 953 | mul_identity = Operation(Op.Mul, op.name + "_mul_identity") |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 954 | mul_identity.add_input_tensor(ifm) |
| 955 | # Create const tensor containing identity as scalar |
| 956 | quantization = ifm.quantization.clone() |
| 957 | quantization.min = 0 |
| 958 | quantization.max = quantization.quant_max - quantization.quant_min |
| 959 | quantization.scale_f32 = 1 |
| 960 | quantization.zero_point = 0 |
| 961 | identity_tens = create_const_tensor( |
| 962 | op.name + "_id_scalar", [], ifm.dtype, [1], np.uint8, quantization=quantization |
| 963 | ) |
| 964 | mul_identity.add_input_tensor(identity_tens) |
Louis Verhaard | ece4e65 | 2021-01-07 13:35:47 +0100 | [diff] [blame] | 965 | # Make sure that fm_id is allocated to a different address than fm_alpha |
| 966 | fm_id = ofm.clone(op.name + "_id", set_unique=True) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 967 | mul_identity.set_output_tensor(fm_id) |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 968 | mul_identity.set_ifm_ofm_shapes() |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 969 | DebugDatabase.add_optimised(op, mul_identity) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 970 | |
| 971 | # Convert LeakyRelu to Max, add the results of the multiplication(s) as inputs |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 972 | op.type = Op.Maximum |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 973 | op.name = op.name.replace("LeakyRelu", "Maximum") |
| 974 | op.inputs = [] |
| 975 | ifm.consumer_list.remove(op) |
| 976 | op.add_input_tensor(fm_alpha) |
| 977 | op.add_input_tensor(fm_id) |
Patrik Gustavsson | c509d33 | 2020-12-22 13:53:52 +0100 | [diff] [blame] | 978 | op.set_ifm_ofm_shapes() |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 979 | |
| 980 | DebugDatabase.add_optimised(op, op) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 981 | return op |
| 982 | |
| 983 | |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 984 | def convert_to_lut(op, lut_values, lut_name): |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 985 | # Rewrite the operation by Add with scalar 0 + LUT activation |
| 986 | ifm = op.inputs[0] |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 987 | if ifm is None: |
| 988 | return op |
Louis Verhaard | 58520b9 | 2020-08-24 16:45:38 +0200 | [diff] [blame] | 989 | assert ifm.dtype.size_in_bytes() == 1 |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 990 | op.type = Op.Add |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 991 | op.name = op.name + "_lut_" + lut_name |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 992 | # Mark as no-op to enable potential fusing optimizations |
| 993 | op.attrs["is_nop"] = True |
| 994 | # Create an input tensor containing scalar zero |
| 995 | quantization = QuantizationParameters(0.0, 255.0) |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 996 | quantization.scale_f32 = ifm.quantization.scale_f32 |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 997 | quantization.zero_point = 0 |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 998 | tens = create_const_tensor(op.inputs[0].name + "_scalar0", [], ifm.dtype, [0], np.uint8, quantization=quantization) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 999 | op.add_input_tensor(tens) |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 1000 | op.ifm_shapes.append(Shape4D(tens.shape)) |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 1001 | |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1002 | # The LUT must be applied without any preceding rescaling (the LUT itself performs the rescale), |
| 1003 | # so even if the OFM has a different scale than the IFM, the generated OFM scale instructions |
| 1004 | # should be the same as the IFM |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1005 | op.forced_output_quantization = ifm.quantization |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 1006 | lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8) |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1007 | op.set_activation_lut(lut_tensor) |
Patrik Gustavsson | c509d33 | 2020-12-22 13:53:52 +0100 | [diff] [blame] | 1008 | op.set_ifm_ofm_shapes() |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1009 | return op |
| 1010 | |
| 1011 | |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 1012 | def convert_to_lut8(op, fn, fn_name): |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1013 | # Converts op to a no-op + int8/uint8 LUT which is generated with the given function. |
| 1014 | # fn is a function(real) -> real |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1015 | ifm, ofm = op.get_ifm_ofm() |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1016 | if ifm.dtype not in (DataType.uint8, DataType.int8) or ifm.dtype != ofm.dtype: |
| 1017 | return op |
| 1018 | # Generate the LUT |
| 1019 | ifm_scale = np.double(ifm.quantization.scale_f32) |
| 1020 | ofm_scale = np.double(ofm.quantization.scale_f32) |
| 1021 | zp_in = ifm.quantization.zero_point |
| 1022 | zp_out = ofm.quantization.zero_point |
| 1023 | values = [] |
| 1024 | ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128) |
| 1025 | quantized_min = min(ix) |
| 1026 | quantized_max = max(ix) |
| 1027 | for x in ix: |
| 1028 | x_real = ifm_scale * (x - zp_in) |
| 1029 | y_real = fn(x_real) |
| 1030 | lut_result = round_away_zero(zp_out + y_real / ofm_scale) |
| 1031 | lut_result = min(quantized_max, max(quantized_min, lut_result)) |
| 1032 | values.append(lut_result) |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 1033 | return convert_to_lut(op, values, fn_name) |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1034 | |
| 1035 | |
| 1036 | def convert_lrelu_to_lut(op, arch): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1037 | ifm, ofm = op.get_ifm_ofm() |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1038 | # Generate the LUT |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 1039 | alpha = op.attrs["alpha"] |
| 1040 | ifm_scale = np.double(ifm.quantization.scale_f32) |
| 1041 | ofm_scale = np.double(ofm.quantization.scale_f32) |
| 1042 | zp_in = ifm.quantization.zero_point |
| 1043 | zp_out = ofm.quantization.zero_point |
| 1044 | identity_scale, identity_shift = scaling.elementwise_mul_scale(ifm_scale, 1, ofm_scale) |
| 1045 | alpha_scalar = 1 |
| 1046 | alpha_scale, alpha_shift = scaling.elementwise_mul_scale(ifm_scale, alpha, ofm_scale) |
| 1047 | if "alpha_scaling" in op.attrs: |
| 1048 | # The LeakyRelu was the result from convert_mul_max_to_abs_or_lrelu |
| 1049 | alpha_scalar, alpha_scale, alpha_shift = op.attrs["alpha_scaling"] |
| 1050 | values = [] |
Louis Verhaard | 58520b9 | 2020-08-24 16:45:38 +0200 | [diff] [blame] | 1051 | ix = range(256) if ifm.dtype == DataType.uint8 else range(-128, 128) |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 1052 | quantized_min = min(ix) |
| 1053 | quantized_max = max(ix) |
| 1054 | for x in ix: |
| 1055 | if x < zp_in: |
| 1056 | lut_result = zp_out + fp_math.multiply_by_quantized_multiplier( |
| 1057 | alpha_scalar * (x - zp_in), alpha_scale, alpha_shift |
| 1058 | ) |
| 1059 | else: |
| 1060 | lut_result = zp_out + fp_math.multiply_by_quantized_multiplier(x - zp_in, identity_scale, identity_shift) |
| 1061 | lut_result = min(quantized_max, max(quantized_min, lut_result)) |
| 1062 | values.append(lut_result) |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 1063 | return convert_to_lut(op, values, "lrelu") |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1064 | |
| 1065 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 1066 | def convert_lrelu(op, arch, nng): |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1067 | # Converts LeakyRelu to a LUT based solution if possible, otherwise a mul + max |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1068 | if op.type != Op.LeakyRelu: |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1069 | return op |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1070 | ifm, ofm = op.get_ifm_ofm() |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 1071 | if ifm is None or ofm is None: |
| 1072 | return op |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 1073 | if ifm.dtype in (DataType.uint8, DataType.int8) and ifm.dtype == ofm.dtype: |
| 1074 | # use LUT for int8/uint8 |
| 1075 | return convert_lrelu_to_lut(op, arch) |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 1076 | if check_quantized_tens_scaling_equal(ifm, ofm) and ifm.dtype == ofm.dtype == DataType.int16: |
Louis Verhaard | d7911c4 | 2020-08-25 13:36:41 +0200 | [diff] [blame] | 1077 | # use LeakyRelu unmodified for int16 with equal input/output scaling |
| 1078 | return op |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1079 | return convert_lrelu_to_mul_max(op, arch) |
| 1080 | |
| 1081 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 1082 | def convert_tanh_sigmoid_to_lut(op, arch, nng): |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1083 | # Converts int8/uint8 Sigmoid and Tanh to a LUT based solution |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1084 | if op.type == Op.Sigmoid: |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 1085 | return convert_to_lut8(op, clamp_sigmoid, "sigmoid") |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1086 | elif op.type == Op.Tanh: |
Louis Verhaard | 2e186c7 | 2020-10-09 10:47:04 +0200 | [diff] [blame] | 1087 | return convert_to_lut8(op, math.tanh, "tanh") |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1088 | return op |
| 1089 | |
| 1090 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1091 | def remove_reshapes(op, arch): |
| 1092 | if op.run_on_npu and op.type == Op.Reshape: |
| 1093 | ofm = op.ofm |
| 1094 | ifm = op.ifm |
Patrik Gustavsson | fa4cb29 | 2020-09-10 08:19:36 +0200 | [diff] [blame] | 1095 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1096 | # Check if quantization is the same in the input and output for the reshape ops |
| 1097 | if not check_quantized_tens_scaling_equal(ifm, ofm): |
| 1098 | # TODO Both tensors are needed, since quantisation properties currently are linked to Tensors. |
| 1099 | # In order to remove this reshape either quantization properties need to be moved to Operator, |
| 1100 | # or the reshape need to be replace with a NOP. |
| 1101 | return |
Patrik Gustavsson | fa4cb29 | 2020-09-10 08:19:36 +0200 | [diff] [blame] | 1102 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1103 | # Check if Reshape ifm/ofm are network ifm/ofm |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 1104 | ifm_is_sg_ifm = ifm.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const) |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1105 | ifm_is_sg_ofm = any(ifm_cons is None for ifm_cons in ifm.consumer_list) |
| 1106 | ofm_is_sg_ofm = any(ofm_cons is None for ofm_cons in ofm.consumer_list) |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 1107 | # This case should be handled prior to this function |
| 1108 | assert not ((ifm_is_sg_ifm or ifm_is_sg_ofm) and ofm_is_sg_ofm) |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1109 | |
| 1110 | if ofm_is_sg_ofm: |
| 1111 | # Bypassed by replacing ifm with ofm |
| 1112 | ofm.ops = [] |
| 1113 | for prev_op in ifm.ops: |
| 1114 | prev_op.outputs = [ofm] |
| 1115 | ofm.ops.append(prev_op) |
| 1116 | |
| 1117 | # All ifm consumers need to use ofm as input |
| 1118 | for ifm_cons in ifm.consumer_list: |
| 1119 | for ifm_idx, cons_ifm in enumerate(ifm_cons.inputs): |
| 1120 | if cons_ifm == ifm: |
| 1121 | ifm_cons.set_input_tensor(ofm, ifm_idx) |
| 1122 | if op.ifm_shapes[0] != op.ofm_shapes[0]: |
| 1123 | ofm.avoid_NHCWB16 = True |
| 1124 | else: |
| 1125 | # Bypassed Reshape by replacing ofm with ifm |
| 1126 | for cons in ofm.consumer_list: |
| 1127 | for ifm_idx, cons_ifm in enumerate(cons.inputs): |
| 1128 | if cons_ifm == ofm: |
| 1129 | cons.set_input_tensor(ifm, ifm_idx) |
| 1130 | if op.ifm_shapes[0] != op.ofm_shapes[0]: |
| 1131 | ifm.avoid_NHCWB16 = True |
| 1132 | |
| 1133 | |
| 1134 | def check_reshapes(op, arch): |
| 1135 | if op.run_on_npu and op.type == Op.Reshape: |
| 1136 | ofm = op.ofm |
| 1137 | |
| 1138 | if check_quantized_tens_scaling_equal(op.ifm, ofm): |
| 1139 | # Reshape should have been removed |
| 1140 | raise VelaError(f"Reshape op {op} expected to have been removed, still remains") |
Patrik Gustavsson | fa4cb29 | 2020-09-10 08:19:36 +0200 | [diff] [blame] | 1141 | |
| 1142 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 1143 | def fuse_activation_function_with_prev(op, arch, nng): |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1144 | # if op is a no-op: attempts to move the activation function to the preceding op |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1145 | if not op.attrs.get("is_nop", False) or op.activation is None: |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1146 | return op |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1147 | ifm, ofm = op.get_ifm_ofm() |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 1148 | if ifm is None or ofm is None: |
| 1149 | return op |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1150 | # finds the input(s) to the operation |
| 1151 | prev_op = ifm.ops[0] |
| 1152 | # Note: the below checks on prev_op require that a first optimize pass on the full graph has been performed |
| 1153 | fuse = ( |
| 1154 | prev_op.run_on_npu |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1155 | and prev_op.type.npu_block_type != NpuBlockType.Default |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1156 | and len(ifm.ops) == 1 |
| 1157 | and len(prev_op.outputs[0].consumers()) == 1 |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1158 | and prev_op.activation is None |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1159 | ) |
| 1160 | if op.activation_lut is not None and arch.shram_reserved_unused_banks == 0: |
| 1161 | # TODO: if SHRAM LUT space is shared with SHRAM ACC (32, 64 MAC), |
| 1162 | # LUT currently only works correctly for elementwise ops |
| 1163 | fuse = False |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1164 | if not fuse: |
| 1165 | return op |
| 1166 | # Move the fused activation function + corresponding info to prev_op |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1167 | prev_op.activation = op.activation |
| 1168 | prev_op.forced_output_quantization = op.forced_output_quantization |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1169 | if op.activation_lut is not None: |
| 1170 | prev_op.set_activation_lut(op.activation_lut) |
| 1171 | # Bypass op |
Louis Verhaard | 98a3499 | 2020-09-01 10:39:04 +0200 | [diff] [blame] | 1172 | prev_op.set_output_tensor(ofm) |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 1173 | DebugDatabase.add_optimised(op, prev_op) |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1174 | return op |
| 1175 | |
| 1176 | |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 1177 | def optimise_pad(op, arch, nng): |
| 1178 | """ |
| 1179 | Converts tens1 -> PAD -> tens2 -> CONV to tens1 -> CONV |
| 1180 | if both operations can be run on the NPU. |
| 1181 | """ |
| 1182 | if ( |
| 1183 | (op.type.is_conv2d_op() or op.type.is_depthwise_conv2d_op()) |
| 1184 | and op.run_on_npu |
| 1185 | and op.attrs["padding"] == Padding.VALID |
| 1186 | ): |
| 1187 | pad_op = op.ifm.ops[0] |
| 1188 | if pad_op.type != Op.Pad or not pad_op.run_on_npu: |
| 1189 | return op |
| 1190 | # Bypass the PAD operator |
| 1191 | op.set_input_tensor(pad_op.ifm, 0) |
| 1192 | # Adjust the padding attributes of the convolution operator |
| 1193 | op.attrs["padding"] = Padding.EXPLICIT |
| 1194 | padding = pad_op.inputs[1].values # 4x2 tensor, first dimension is N, H, W, C |
| 1195 | top, left, bottom, right = (padding[1][0], padding[2][0], padding[1][1], padding[2][1]) |
| 1196 | op.attrs["explicit_padding"] = (top, left, bottom, right) |
| 1197 | op.set_ifm_ofm_shapes() |
| 1198 | return op |
| 1199 | |
| 1200 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 1201 | def add_attrs_to_resizebilinear(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1202 | if op.type == Op.ResizeBilinear and op.run_on_npu: |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 1203 | input_tensor = op.inputs[0] |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1204 | input_shape = op.ifm_shapes[0] |
| 1205 | upscaled_height = input_shape.height * 2 |
| 1206 | upscaled_width = input_shape.width * 2 |
| 1207 | out_shape = op.ofm_shapes[0] |
| 1208 | if not op.attrs["align_corners"] and out_shape.height == upscaled_height and out_shape.width == upscaled_width: |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 1209 | # this means the output is supposed to be a x2 upscale, |
| 1210 | # so we need to do SAME padding |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 1211 | op.attrs["padding"] = Padding.SAME |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1212 | elif ( |
| 1213 | op.attrs["align_corners"] |
| 1214 | and out_shape.height == (upscaled_height - 1) |
| 1215 | and out_shape.width == (upscaled_width - 1) |
| 1216 | ): |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 1217 | # here we can just run the avg pool without padding and |
| 1218 | # produce a (M * 2 - 1, N * 2 - 1) sized output |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 1219 | op.attrs["padding"] = Padding.VALID |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 1220 | else: |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 1221 | return op |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 1222 | input_tensor.resampling_mode = resampling_mode.NEAREST |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 1223 | op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)}) |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 1224 | return op |
| 1225 | |
| 1226 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 1227 | def fixup_bias_tensors(op, arch, nng): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 1228 | if op.type.needs_bias() and op.bias is None: |
Jacob Bohlin | a41cd4d | 2020-08-26 18:21:28 +0200 | [diff] [blame] | 1229 | # Op has no bias, add bias tensor filled with zeros |
| 1230 | nr_biases = op.inputs[1].shape[-1] |
| 1231 | bias_values = [0] * nr_biases |
| 1232 | bias_tensor = create_const_tensor(op.name + "_bias", [nr_biases], DataType.int32, bias_values) |
| 1233 | bias_tensor.quant_values = bias_tensor.values |
| 1234 | op.set_input_tensor(bias_tensor, -1) |
Jacob Bohlin | 67e0d8f | 2020-08-20 10:53:02 +0200 | [diff] [blame] | 1235 | |
| 1236 | return op |
| 1237 | |
| 1238 | |
Patrik Gustavsson | 3010d9b | 2020-10-01 08:22:10 +0200 | [diff] [blame] | 1239 | def supported_operator_check(op, arch, nng): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1240 | op.run_on_npu = arch.supported_operators.is_operator_supported(op) |
| 1241 | return op |
| 1242 | |
| 1243 | |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 1244 | def _record_optimised(op, arch): |
| 1245 | if op.type != Op.Const: |
| 1246 | DebugDatabase.add_optimised(op, op) |
| 1247 | |
| 1248 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1249 | def optimise_graph_a(nng, arch, verbose_graph=False): |
| 1250 | if verbose_graph: |
| 1251 | nng.print_graph() |
| 1252 | |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 1253 | pre_process_list = [ |
| 1254 | supported_operator_check, |
| 1255 | set_ifm_ofm_op_shapes, |
| 1256 | # TODO: memory-only Op removal |
| 1257 | ] |
| 1258 | |
| 1259 | for idx, sg in enumerate(nng.subgraphs): |
| 1260 | # rewrite graph pass |
| 1261 | nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( |
| 1262 | nng, sg, arch, [], pre_process_list, rewrite_unsupported=False, |
| 1263 | ) |
| 1264 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1265 | # Handle Concat Ops |
| 1266 | for idx, sg in enumerate(nng.subgraphs): |
| 1267 | # rewrite graph pass |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 1268 | rewrite_graph.visit_graph_post_order(sg.output_tensors, arch, [], [rewrite_concat_ops]) |
| 1269 | sg.refresh_after_modification() |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1270 | |
| 1271 | # Handle Split Ops |
| 1272 | for idx, sg in enumerate(nng.subgraphs): |
| 1273 | # rewrite graph pass |
| 1274 | nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( |
| 1275 | nng, |
| 1276 | sg, |
| 1277 | arch, |
| 1278 | [], |
| 1279 | [rewrite_unpack_output, rewrite_stridedslice_output, convert_nop_split_to_identity], |
| 1280 | rewrite_unsupported=False, |
| 1281 | ) |
| 1282 | |
| 1283 | for idx, sg in enumerate(nng.subgraphs): |
| 1284 | # rewrite graph pass |
| 1285 | nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( |
| 1286 | nng, sg, arch, [rewrite_split_ops], [], rewrite_unsupported=False, |
| 1287 | ) |
| 1288 | |
Patrik Gustavsson | 138d47f | 2021-02-08 10:13:48 +0100 | [diff] [blame] | 1289 | # Handle sg input output |
| 1290 | for idx, sg in enumerate(nng.subgraphs): |
| 1291 | nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( |
| 1292 | nng, sg, arch, [], [fix_sg_input_output], rewrite_unsupported=False, |
| 1293 | ) |
| 1294 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1295 | # Removal of reshapes |
| 1296 | for sg in nng.subgraphs: |
| 1297 | rewrite_graph.visit_graph_post_order(sg.output_tensors, arch, [], [remove_reshapes]) |
| 1298 | sg.refresh_after_modification() |
| 1299 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1300 | op_rewrite_list = [ |
Tim Hall | 4e12776 | 2020-05-15 16:05:49 +0100 | [diff] [blame] | 1301 | set_tensor_equivalence, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1302 | convert_depthwise_to_conv, |
Michael McGeagh | 8d939c0 | 2020-07-29 13:11:43 +0100 | [diff] [blame] | 1303 | convert_conv_to_fc, |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 1304 | convert_softmax, |
Diqing Zhong | 016b827 | 2020-12-16 16:46:06 +0100 | [diff] [blame] | 1305 | optimise_strided_conv, |
Diqing Zhong | 189f748 | 2021-01-26 12:12:51 +0100 | [diff] [blame] | 1306 | convert_hardswish_to_lut, |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 1307 | rewrite_fully_connected_input, |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 1308 | convert_batched_fc_shape, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1309 | fixup_conv2d_backprop, |
Michael McGeagh | 8dbf8cf | 2020-09-08 11:09:48 +0100 | [diff] [blame] | 1310 | fixup_relus_with_differing_ifm_ofm_scaling, |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1311 | fixup_elementwise_with_scalars, # TODO Move to early stage? |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 1312 | reorder_depthwise_weights, |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 1313 | fixup_resizebilinear, |
Jacob Bohlin | a41cd4d | 2020-08-26 18:21:28 +0200 | [diff] [blame] | 1314 | fixup_bias_tensors, |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1315 | convert_mul_max_to_abs_or_lrelu, |
| 1316 | convert_lrelu, |
Louis Verhaard | f03bad3 | 2020-09-25 08:30:44 +0200 | [diff] [blame] | 1317 | convert_tanh_sigmoid_to_lut, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1318 | ] |
| 1319 | |
| 1320 | for idx, sg in enumerate(nng.subgraphs): |
| 1321 | # rewrite graph pass |
| 1322 | nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( |
Dwight Lidman | 73320a4 | 2020-11-05 10:34:41 +0100 | [diff] [blame] | 1323 | nng, sg, arch, [], op_rewrite_list, rewrite_unsupported=False, |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1324 | ) |
| 1325 | |
| 1326 | for idx, sg in enumerate(nng.subgraphs): |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1327 | # remove passthrough tensors and attempt further optimizations |
| 1328 | nng.subgraphs[idx] = rewrite_graph.rewrite_graph_pre_order( |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 1329 | nng, |
| 1330 | sg, |
| 1331 | arch, |
| 1332 | [remove_passthrough_tensor], |
| 1333 | [fuse_activation_function_with_prev, optimise_pad, add_padding_fields], |
Louis Verhaard | b9fc33c | 2020-08-13 11:47:36 +0200 | [diff] [blame] | 1334 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1335 | |
Patrik Gustavsson | e3b1b91 | 2021-02-09 15:38:46 +0100 | [diff] [blame] | 1336 | # Removal of SplitSliceRead, need to be done after optimisation has been performed, |
| 1337 | # since ifm/ofm_shapes are of importance to this function |
| 1338 | for sg in nng.subgraphs: |
| 1339 | rewrite_graph.visit_graph_post_order(sg.output_tensors, arch, [], [remove_SplitSliceRead]) |
| 1340 | sg.refresh_after_modification() |
| 1341 | |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1342 | # Post-optimisation operator debug tracing, and checking that no undesired reshapes are left in the graph |
Tim Hall | e6ccd87 | 2020-11-09 16:46:37 +0000 | [diff] [blame] | 1343 | for sg in nng.subgraphs: |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 1344 | rewrite_graph.visit_graph_post_order(sg.output_tensors, arch, [], [check_reshapes, _record_optimised]) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1345 | |
| 1346 | if verbose_graph: |
| 1347 | nng.print_graph() |
| 1348 | return nng |