Rickard Bolin | fea1516 | 2022-07-04 16:19:16 +0000 | [diff] [blame^] | 1 | # Copyright (C) 2021-2022 Arm Limited or its affiliates. All rights reserved. |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | # Description: |
| 17 | # Common functions and definitions used during the graph optimization. |
Patrik Gustavsson | c74682c | 2021-08-17 14:26:38 +0200 | [diff] [blame] | 18 | from typing import Tuple |
| 19 | |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 20 | import numpy as np |
| 21 | |
Patrik Gustavsson | f436ada | 2021-09-14 14:56:48 +0200 | [diff] [blame] | 22 | from . import lut |
Tim Hall | d6efcd3 | 2022-09-02 15:01:01 +0100 | [diff] [blame] | 23 | from .architecture_features import Accelerator |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 24 | from .data_type import DataType |
| 25 | from .debug_database import DebugDatabase |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 26 | from .errors import UnsupportedFeatureError |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 27 | from .errors import VelaError |
| 28 | from .operation import Op |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 29 | from .operation_util import create_avgpool_nop |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 30 | from .shape4d import Shape4D |
Patrik Gustavsson | f436ada | 2021-09-14 14:56:48 +0200 | [diff] [blame] | 31 | from .tensor import create_const_tensor |
| 32 | from .tensor import QuantizationParameters |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 33 | |
Jonas Ohlsson | 81942e9 | 2021-08-20 09:33:28 +0200 | [diff] [blame] | 34 | memory_only_ops = ( |
| 35 | Op.Reshape, |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 36 | Op.QuantizedReshape, |
Jonas Ohlsson | 81942e9 | 2021-08-20 09:33:28 +0200 | [diff] [blame] | 37 | Op.Squeeze, |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 38 | Op.ExpandDims, |
Patrik Gustavsson | ef3ebdd | 2021-10-01 11:10:25 +0200 | [diff] [blame] | 39 | Op.Identity, |
Jonas Ohlsson | 81942e9 | 2021-08-20 09:33:28 +0200 | [diff] [blame] | 40 | ) |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 41 | |
| 42 | |
| 43 | def _avoid_nhcwb16_for_concat(tens): |
| 44 | # If axis corresponds to C-dimension, NHCWB16 can only be used in the output if all the concat_start's are a |
| 45 | # multiple of 16. This as, it is only then the address offset for the ofm, for all operations, will be 16 byte |
| 46 | # aligned. For other values of axis the address offsets will be 16 byte aligned, as they are all based on c = 0 |
| 47 | # and those addresses are always 16 byte aligned due to the NHCWB16 format. |
| 48 | return any(op.write_offset.depth % 16 != 0 for op in tens.ops if op.write_offset is not None) |
| 49 | |
| 50 | |
| 51 | def _avoid_nhcwb16_for_split(tens): |
| 52 | # If read offset is not a multiple of 16 in the C-dimension, NHCWB16 need to be avoided in the input |
James Ward | 6bf1613 | 2021-09-08 11:14:20 +0100 | [diff] [blame] | 53 | |
| 54 | # Return True if NHCWB16 needs to be avoided |
| 55 | def offset_not_aligned(read_offset): |
| 56 | return read_offset is not None and (read_offset.depth % 16) != 0 |
| 57 | |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 58 | for cons_op in tens.consumer_list: |
| 59 | if cons_op.ifm == tens: |
James Ward | 6bf1613 | 2021-09-08 11:14:20 +0100 | [diff] [blame] | 60 | if offset_not_aligned(cons_op.read_offsets[0]): |
| 61 | return True |
| 62 | if cons_op.ifm2 is not None and cons_op.ifm2 == tens: |
| 63 | if offset_not_aligned(cons_op.read_offsets[1]): |
| 64 | return True |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 65 | return False |
| 66 | |
| 67 | |
| 68 | def _avoid_nhcwb16_for_shapes(tens): |
| 69 | # check all producers/consumers to see if any op shape is preventing NHCWB16 |
| 70 | for cons_op in tens.consumer_list: |
| 71 | if cons_op.ifm == tens: |
| 72 | cons_op_shape = cons_op.ifm_shapes[0] |
| 73 | elif cons_op.type.is_binary_elementwise_op() and cons_op.ifm2 == tens: |
| 74 | cons_op_shape = cons_op.ifm_shapes[1] |
| 75 | else: |
| 76 | assert False |
| 77 | if Shape4D(tens.shape) != cons_op_shape: |
| 78 | return True |
| 79 | |
| 80 | for prod_op in tens.ops: |
| 81 | if Shape4D(tens.shape) != prod_op.ofm_shapes[0]: |
| 82 | return True |
| 83 | |
| 84 | return False |
| 85 | |
| 86 | |
| 87 | # Check if non linear format can be used |
| 88 | def check_format_restrictions(tens, arch): |
| 89 | if len(tens.ops) < 1: |
| 90 | return |
| 91 | if tens.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const) or any( |
| 92 | cons is None for cons in tens.consumer_list |
| 93 | ): |
| 94 | return |
| 95 | |
| 96 | # Check if any of the producers/consumers is run on CPU |
| 97 | if not all(cons.run_on_npu for cons in tens.consumer_list): |
| 98 | return |
| 99 | if not all(prod.run_on_npu for prod in tens.ops): |
| 100 | return |
| 101 | |
| 102 | # "Concat" ofm exception: |
| 103 | if _avoid_nhcwb16_for_concat(tens): |
| 104 | return |
| 105 | |
| 106 | # "Split" ifm exception: |
| 107 | if _avoid_nhcwb16_for_split(tens): |
| 108 | return |
| 109 | |
| 110 | # Shapes checking: check all producers/consumers are NHCWB16 compatible with tens.shape |
| 111 | if _avoid_nhcwb16_for_shapes(tens): |
| 112 | return |
| 113 | |
Rickard Bolin | fea1516 | 2022-07-04 16:19:16 +0000 | [diff] [blame^] | 114 | # Resize bilinear half pixel center implementation requires OFM with linear format to |
| 115 | # allow stride modification in H/W dimensions. |
| 116 | for op in tens.ops: |
| 117 | if op.original_type == Op.ResizeBilinear and op.type == Op.DepthwiseConv2DBias: |
| 118 | return |
| 119 | |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 120 | for op in tens.consumer_list: |
Tim Hall | d6efcd3 | 2022-09-02 15:01:01 +0100 | [diff] [blame] | 121 | if op.type == Op.ReduceSum and ( |
| 122 | tens.dtype == DataType.int32 or arch.accelerator_config == Accelerator.Ethos_U65_512 |
| 123 | ): |
| 124 | # ReduceSum requires NHWC input |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 125 | return |
| 126 | if op.type == Op.Reshape: |
| 127 | # Using NHCWB16 format for a no-op reshape is only an option if subsequent |
| 128 | # consumers do not also need to perform a reshape or if the OFM is going to |
| 129 | # be processed by CPU operations. No-op reshape consumers with empty lists |
| 130 | # (those that have no consumers, or null-consumers used as list terminators) |
| 131 | # must use normal NHWC output. |
| 132 | |
| 133 | def incompatible_consumers(oper): |
| 134 | if oper and oper.type == Op.Reshape: |
| 135 | for consumer in oper.outputs[0].consumer_list: |
| 136 | yield from incompatible_consumers(consumer) |
| 137 | yield not oper or not oper.run_on_npu |
| 138 | |
| 139 | if not any(incompatible_consumers(op)): |
| 140 | |
| 141 | def get_rewrites(oper): |
| 142 | if oper and oper.type == Op.Reshape: |
| 143 | for consumer in oper.outputs[0].consumer_list: |
| 144 | yield from get_rewrites(consumer) |
| 145 | yield oper |
| 146 | |
| 147 | # Detect no-op reshapes by comparing their full input and output tensor shapes. |
| 148 | inshape = op.ifm_shapes[0] |
| 149 | compatible_shape = [(inshape == oper.ofm_shapes[0]) for oper in get_rewrites(op)] |
| 150 | if not (compatible_shape and all(compatible_shape)): |
| 151 | return |
| 152 | else: |
| 153 | return |
| 154 | |
| 155 | tens.needs_linear_format = False |
| 156 | |
| 157 | |
Patrik Gustavsson | c74682c | 2021-08-17 14:26:38 +0200 | [diff] [blame] | 158 | def calc_explicit_padding(input_size, stride, filter_size, pad_before, pad_after) -> Tuple[int, int]: |
| 159 | """ |
| 160 | Based on explicit padding provided in a PAD operation, returns the corresponding hardware padding |
| 161 | that provides equivalent results. |
| 162 | """ |
| 163 | total_padding = needed_total_padding(input_size, stride, filter_size) |
| 164 | |
| 165 | # The bottom/right padding might need downward adjustment depending on stride/input size |
| 166 | total_minus_before = total_padding - pad_before |
| 167 | output_pad_after = pad_after |
| 168 | while output_pad_after > 0 and output_pad_after % stride != total_minus_before % stride: |
| 169 | output_pad_after -= 1 |
| 170 | return pad_before, output_pad_after |
| 171 | |
| 172 | |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 173 | def needed_total_padding(input_size, stride, filter_size): |
| 174 | out_size = (input_size + stride - 1) // stride |
| 175 | needed_input = (out_size - 1) * stride + filter_size |
| 176 | total_padding = max(0, needed_input - input_size) |
| 177 | return total_padding |
| 178 | |
| 179 | |
| 180 | # Set input/output tensor equivalence to the same id for memory operations |
| 181 | def set_tensor_equivalence(op, arch, nng): |
| 182 | if op.type in memory_only_ops: |
| 183 | eid = op.outputs[0].equivalence_id |
| 184 | for inp in op.inputs: |
| 185 | inp.equivalence_id = eid |
| 186 | return op |
| 187 | |
| 188 | |
| 189 | def set_ifm_ofm_op_shapes(op, arch, nng): |
| 190 | if op.run_on_npu and op.type.needs_shapes(): |
| 191 | if op.ifm_shapes or op.ofm_shapes: |
| 192 | # Shapes already set |
| 193 | return op |
| 194 | op.set_ifm_ofm_shapes() |
| 195 | return op |
| 196 | |
| 197 | |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 198 | def bypass_memory_only_ops(op): |
| 199 | assert op.type in memory_only_ops |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 200 | ofm = op.ofm |
| 201 | ifm = op.ifm |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 202 | |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 203 | # Check if ifm/ofm are network ifm/ofm |
| 204 | ifm_is_sg_ifm = ifm.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const) |
| 205 | ifm_is_sg_ofm = any(ifm_cons is None for ifm_cons in ifm.consumer_list) |
| 206 | ofm_is_sg_ofm = any(ofm_cons is None for ofm_cons in ofm.consumer_list) |
| 207 | # Check if ifm/ofm is produced respectively consumed by CPU |
| 208 | ifm_is_cpu_produced = any(ifm_prod is not None and not ifm_prod.run_on_npu for ifm_prod in op.ifm.ops) |
| 209 | ofm_is_cpu_consumed = any(ofm_cons is not None and not ofm_cons.run_on_npu for ofm_cons in op.ofm.consumer_list) |
| 210 | |
| 211 | # This case should be handled prior to this function |
| 212 | assert not ((ifm_is_sg_ifm or ifm_is_sg_ofm or ifm_is_cpu_produced) and (ofm_is_sg_ofm or ofm_is_cpu_consumed)) |
| 213 | |
| 214 | if ofm_is_sg_ofm or ofm_is_cpu_consumed: |
| 215 | # Bypassed by replacing ifm with ofm |
| 216 | ofm.ops = [] |
| 217 | for prev_op in ifm.ops: |
| 218 | prev_op.outputs = [ofm] |
| 219 | ofm.ops.append(prev_op) |
| 220 | |
| 221 | # All ifm consumers need to use ofm as input |
| 222 | for ifm_cons in ifm.consumer_list: |
| 223 | for ifm_idx, cons_ifm in enumerate(ifm_cons.inputs): |
| 224 | if cons_ifm == ifm: |
| 225 | ifm_cons.set_input_tensor(ofm, ifm_idx) |
| 226 | else: |
| 227 | # Bypassed by replacing ofm with ifm |
| 228 | for cons in ofm.consumer_list: |
| 229 | for ifm_idx, cons_ifm in enumerate(cons.inputs): |
| 230 | if cons_ifm == ofm: |
| 231 | cons.set_input_tensor(ifm, ifm_idx) |
| 232 | |
| 233 | |
Patrik Gustavsson | f1580f0 | 2021-09-01 12:43:02 +0200 | [diff] [blame] | 234 | def move_splitsliceread_to_consumer(op, cons_op): |
| 235 | assert op.type == Op.SplitSliceRead |
| 236 | |
| 237 | if cons_op.ifm == op.ofm: |
| 238 | cons_op.read_offsets[0] = op.read_offsets[0] |
| 239 | cons_op.read_shapes[0] = op.read_shapes[0] |
| 240 | cons_op.set_input_tensor(op.ifm, cons_op.type.info.indices.ifms[0]) |
| 241 | cons_op.ifm_shapes[0] = op.ifm_shapes[0] |
| 242 | elif cons_op.type.is_binary_elementwise_op() and cons_op.ifm2 == op.ofm: |
| 243 | cons_op.read_offsets[1] = op.read_offsets[0] |
| 244 | cons_op.read_shapes[1] = op.read_shapes[0] |
| 245 | cons_op.set_input_tensor(op.ifm, cons_op.type.info.indices.ifms[1]) |
| 246 | cons_op.ifm_shapes[1] = op.ifm_shapes[0] |
| 247 | |
Patrik Gustavsson | f1580f0 | 2021-09-01 12:43:02 +0200 | [diff] [blame] | 248 | op.ofm.consumer_list.remove(cons_op) |
| 249 | op.ofm.ops = [] |
| 250 | op.ifm.consumer_list.remove(op) |
| 251 | |
| 252 | |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 253 | def check_memory_only_removed(op, arch): |
| 254 | if op.run_on_npu and op.type in memory_only_ops: |
| 255 | # Memory only operators should have been removed |
| 256 | raise VelaError(f"Memory only {op.type} op {op} expected to have been removed, still remains") |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 257 | |
| 258 | |
| 259 | def record_optimised(op, arch): |
| 260 | if op.type != Op.Const: |
| 261 | DebugDatabase.add_optimised(op, op) |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 262 | |
| 263 | |
| 264 | def insert_copy_op_after_tens(tens): |
| 265 | tens_cons_list_copy = tens.consumer_list.copy() |
| 266 | |
| 267 | # Create a avg_pool nop op with ifm as input |
| 268 | copy_tens = tens.clone() |
| 269 | copy_op = create_avgpool_nop(tens.name + "_avgpool") |
| 270 | copy_op.add_input_tensor(tens) |
| 271 | copy_op.set_output_tensor(copy_tens) |
| 272 | copy_op.set_ifm_ofm_shapes() |
| 273 | copy_op.run_on_npu = True |
| 274 | |
| 275 | # Set copy_ifm consumers |
| 276 | for tens_cons in tens_cons_list_copy: |
| 277 | if tens_cons is not None: |
| 278 | for ifm_idx, cons_inp in enumerate(tens_cons.inputs): |
| 279 | if cons_inp == tens: |
| 280 | tens_cons.set_input_tensor(copy_tens, ifm_idx) |
| 281 | |
| 282 | DebugDatabase.add_optimised(tens.ops[0], copy_op) |
| 283 | |
| 284 | |
| 285 | def fix_sg_input_output(op, arch, nng): |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 286 | if not op.run_on_npu or op.type not in memory_only_ops: |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 287 | return op |
| 288 | |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 289 | # For the memory only operators we want to remove, tensors are removed. |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 290 | # But in order to to do this, they cannot be outputs of the sg, |
| 291 | # this need to be fixed prior to the removal. |
| 292 | # Solution is to add a avgpool NOP, to maintain the original tensor. |
| 293 | # This is also valid when reshape ifm/ofm is produced respectively |
| 294 | # consumed by CPU |
| 295 | |
| 296 | # Check if operator ifm/ofm are sg ifm/ofm |
| 297 | ifm_is_sg_ifm = op.ifm.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const) |
| 298 | ifm_is_sg_ofm = any(ifm_cons is None for ifm_cons in op.ifm.consumer_list) |
| 299 | ofm_is_sg_ofm = any(ofm_cons is None for ofm_cons in op.ofm.consumer_list) |
| 300 | # Check if ifm/ofm is produced respectively consumed by CPU |
| 301 | ifm_is_cpu_produced = any(ifm_prod is not None and not ifm_prod.run_on_npu for ifm_prod in op.ifm.ops) |
| 302 | ofm_is_cpu_consumed = any(ofm_cons is not None and not ofm_cons.run_on_npu for ofm_cons in op.ofm.consumer_list) |
| 303 | |
| 304 | if (ifm_is_sg_ofm or ifm_is_sg_ifm or ifm_is_cpu_produced) and (ofm_is_sg_ofm or ofm_is_cpu_consumed): |
Jonas Ohlsson | 0957e3e | 2021-09-01 15:57:21 +0200 | [diff] [blame] | 305 | # Both ifm and ofm need to persist, but only ifm need a copy, in order to remove the memory only operator. |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 306 | insert_copy_op_after_tens(op.ifm) |
| 307 | |
| 308 | return op |
| 309 | |
| 310 | |
| 311 | def convert_depthwise_to_conv(op, arch, nng): |
| 312 | # Depthwise is equivalent to a single conv2d if the ifm depth is 1 and |
| 313 | # the ofm depth equals the depth multipler. |
| 314 | # If those conditions are true, then we can perform a simple |
| 315 | # switch of the operator type (and weight order) |
| 316 | |
| 317 | if op.type == Op.DepthwiseConv2DBias and (op.attrs["depth_multiplier"] != 1): |
| 318 | ifm_shape = op.ifm_shapes[0] |
| 319 | weight_tensor = op.inputs[1] |
| 320 | ofm_shape = op.ofm_shapes[0] |
| 321 | if (ifm_shape.depth == 1) and (ofm_shape.depth == op.attrs["depth_multiplier"]): |
| 322 | # Change op type to Conv2d |
| 323 | op.type = Op.Conv2DBias |
| 324 | del op.attrs["channel_multiplier"] |
| 325 | del op.attrs["depth_multiplier"] |
| 326 | |
| 327 | weight_tensor.values = np.transpose(weight_tensor.values, (0, 1, 3, 2)) |
| 328 | weight_tensor.set_all_shapes(list(weight_tensor.values.shape)) |
| 329 | else: |
| 330 | raise UnsupportedFeatureError( |
| 331 | f"Unsupported 'DEPTHWISE_CONV_2D' with depth_multiplier = {op.attrs['depth_multiplier']},", |
| 332 | f" ifm channels = {ifm_shape.depth}, ofm channels = {ofm_shape.depth}", |
| 333 | ) |
| 334 | DebugDatabase.add_optimised(op, op) |
| 335 | return op |
Patrik Gustavsson | f436ada | 2021-09-14 14:56:48 +0200 | [diff] [blame] | 336 | |
| 337 | |
| 338 | def convert_to_lut(op, lut_values, lut_name): |
| 339 | # Rewrite the operation by Add with scalar 0 + LUT activation |
| 340 | ifm = op.inputs[0] |
| 341 | if ifm is None: |
| 342 | return op |
| 343 | assert ifm.dtype.size_in_bytes() == 1 |
| 344 | op.type = Op.Add |
| 345 | op.name = op.name + "_lut_" + lut_name |
| 346 | # Mark as no-op to enable potential fusing optimizations |
| 347 | op.attrs["is_nop"] = True |
| 348 | # Create an input tensor containing scalar zero |
| 349 | quantization = QuantizationParameters(0.0, 255.0) |
| 350 | quantization.scale_f32 = ifm.quantization.scale_f32 |
| 351 | quantization.zero_point = 0 |
| 352 | tens = create_const_tensor(op.inputs[0].name + "_scalar0", [], ifm.dtype, [0], np.uint8, quantization=quantization) |
| 353 | op.add_input_tensor(tens) |
| 354 | op.ifm_shapes.append(Shape4D(tens.shape)) # TODO no shape? |
| 355 | |
| 356 | # The LUT must be applied without any preceding rescaling (the LUT itself performs the rescale), |
| 357 | # so even if the OFM has a different scale than the IFM, the generated OFM scale instructions |
| 358 | # should be the same as the IFM |
| 359 | op.forced_output_quantization = ifm.quantization |
| 360 | lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8) |
| 361 | op.set_activation_lut(lut_tensor) |
| 362 | op.set_ifm_ofm_shapes() |
| 363 | return op |