Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 1 | # Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | # Description: |
| 17 | # The TosaSupportedOperators class which is a collection of all supported operators and parameter checks. |
| 18 | from collections import defaultdict |
| 19 | |
| 20 | from .data_type import DataType |
| 21 | from .operation import Op |
| 22 | from .supported_operators_util import docstring_format_args |
| 23 | from .supported_operators_util import list_formatter |
| 24 | from .tosa_mapping import optype_to_tosa_op_type |
| 25 | |
| 26 | |
| 27 | class TosaSupportedOperators: |
| 28 | # TODO currently sparsely populated |
| 29 | # Categorised lists of supported operators |
| 30 | convolution_ops = set((Op.Conv2DBias,)) |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 31 | depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,)) |
| 32 | convolution_like_ops = convolution_ops | depthwise_convolution_ops |
| 33 | |
| 34 | # TODO depending on what will be committed |
Patrik Gustavsson | c74682c | 2021-08-17 14:26:38 +0200 | [diff] [blame] | 35 | max_pooling_ops = Op.op_set(Op.is_maxpool_op) |
| 36 | avg_pooling_ops = Op.op_set(Op.is_avgpool_op) |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 37 | pooling_ops = max_pooling_ops | avg_pooling_ops |
| 38 | fc_vector_products = set((Op.FullyConnected,)) |
Patrik Gustavsson | c74682c | 2021-08-17 14:26:38 +0200 | [diff] [blame] | 39 | |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 40 | mac_main_ops = convolution_like_ops | pooling_ops | fc_vector_products |
Patrik Gustavsson | f1580f0 | 2021-09-01 12:43:02 +0200 | [diff] [blame] | 41 | memory_only_ops = set((Op.Reshape, Op.Transpose, Op.Concat, Op.SplitSliceRead,)) |
Patrik Gustavsson | b081d67 | 2021-08-25 13:49:25 +0200 | [diff] [blame] | 42 | binary_elem_wise_add_mul_sub = set((Op.Add, Op.Mul, Op.RescaleMul, Op.Sub,)) |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 43 | type_conversion_ops = set((Op.Rescale,)) |
Patrik Gustavsson | 5e26eda | 2021-06-30 09:07:16 +0200 | [diff] [blame] | 44 | relu_ops = set((Op.Clamp, Op.ReluN,)) |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 45 | activation_ops = relu_ops |
Patrik Gustavsson | e2bfa7e | 2021-09-08 15:04:11 +0200 | [diff] [blame^] | 46 | pad_ops = set((Op.Pad,)) |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 47 | |
| 48 | npu_post_ops = activation_ops |
Patrik Gustavsson | b081d67 | 2021-08-25 13:49:25 +0200 | [diff] [blame] | 49 | supported_operators = ( |
Patrik Gustavsson | e2bfa7e | 2021-09-08 15:04:11 +0200 | [diff] [blame^] | 50 | mac_main_ops | type_conversion_ops | npu_post_ops | memory_only_ops | binary_elem_wise_add_mul_sub | pad_ops |
Patrik Gustavsson | b081d67 | 2021-08-25 13:49:25 +0200 | [diff] [blame] | 51 | ) |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 52 | |
| 53 | # Supported data types |
| 54 | # TODO will differ compared to TensorFlow Lite, currently set to the same |
Patrik Gustavsson | f1580f0 | 2021-09-01 12:43:02 +0200 | [diff] [blame] | 55 | supported_op_dtypes = set((DataType.uint8, DataType.int8, DataType.int16, DataType.int32)) # TODO add bool |
Patrik Gustavsson | f366fb1 | 2021-09-07 13:30:29 +0200 | [diff] [blame] | 56 | tens_dim_range = (1, 65535) # TODO HW limitation, that is to be resolved in SW |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 57 | |
| 58 | def __init__(self): |
| 59 | # Setup the generic constraints. Note: the order matters |
| 60 | self.generic_constraints = [] |
| 61 | self.generic_constraints.append(TosaSupportedOperators.constraint_tens_dtype) |
Patrik Gustavsson | e2bfa7e | 2021-09-08 15:04:11 +0200 | [diff] [blame^] | 62 | self.generic_constraints.append(TosaSupportedOperators.constraint_tens_dimension) # TODO as not supported yet |
| 63 | self.generic_constraints.append(TosaSupportedOperators.constraint_rank) # TODO as not supported yet |
| 64 | self.generic_constraints.append(TosaSupportedOperators.constraint_batch) # TODO as not supported yet |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 65 | |
| 66 | # Setup specific constraints. Note: the order matters |
| 67 | self.specific_constraints = defaultdict(list) |
| 68 | |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 69 | self.specific_constraints[Op.Transpose].append(TosaSupportedOperators.constraint_ifm_producer) |
Patrik Gustavsson | e2bfa7e | 2021-09-08 15:04:11 +0200 | [diff] [blame^] | 70 | self.specific_constraints[Op.Pad].append(TosaSupportedOperators.constraint_padding_producer) |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 71 | |
| 72 | # Depthwise Conv specific checks: |
| 73 | for op_type in TosaSupportedOperators.depthwise_convolution_ops: |
| 74 | self.specific_constraints[op_type].append(TosaSupportedOperators.constraint_depth_multiplier) |
| 75 | |
Patrik Gustavsson | f366fb1 | 2021-09-07 13:30:29 +0200 | [diff] [blame] | 76 | # Avgpool specific checks |
| 77 | for op_type in TosaSupportedOperators.avg_pooling_ops: |
| 78 | self.specific_constraints[op_type].append(TosaSupportedOperators.constraint_padding) |
| 79 | |
Patrik Gustavsson | 8f1f9aa | 2021-06-28 07:41:58 +0200 | [diff] [blame] | 80 | def is_operator_supported(self, op): |
| 81 | ext_type = optype_to_tosa_op_type(op.type) |
| 82 | if op.type not in TosaSupportedOperators.supported_operators: |
| 83 | if op.type not in (Op.Placeholder, Op.SubgraphInput, Op.Const): |
| 84 | print(f"Info: {ext_type} '{op.name}' is not a NPU op") |
| 85 | return False |
| 86 | |
| 87 | for constraint in self.generic_constraints + self.specific_constraints[op.type]: |
| 88 | valid, extra = constraint(op) |
| 89 | if not valid: |
| 90 | print(f"Warning: {ext_type} '{op.name}' is not supported on the NPU") |
| 91 | print(f" - {constraint.__doc__}") |
| 92 | if extra: |
| 93 | print(f" {extra}") |
| 94 | return False |
| 95 | |
| 96 | return True |
| 97 | |
| 98 | # TODO this function is the same for TensorFlow Lite, but input might differ |
| 99 | @classmethod |
| 100 | @docstring_format_args([list_formatter(supported_op_dtypes)]) |
| 101 | def constraint_tens_dtype(cls, op): |
| 102 | "Tensors must be of type: {}" |
| 103 | valid = True |
| 104 | extra = [] |
| 105 | tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens] |
| 106 | if not tensors: |
| 107 | tensors = [tens for tens in op.inputs if tens] |
| 108 | for tens in tensors: |
| 109 | if tens.dtype not in cls.supported_op_dtypes: |
| 110 | valid = False |
| 111 | extra.append(f"Tensor '{tens.name}' has data type: {tens.dtype}") |
| 112 | return valid, ", ".join(extra) |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 113 | |
Patrik Gustavsson | f366fb1 | 2021-09-07 13:30:29 +0200 | [diff] [blame] | 114 | # TODO Duplicates check present for TFLite. But it is only temporarily added |
| 115 | # This is for a HW limitation, that is to be resolved in SW later on |
| 116 | @classmethod |
| 117 | @docstring_format_args(tens_dim_range) |
| 118 | def constraint_tens_dimension(cls, op): |
| 119 | "Tensor dimensions must be in the range [{}, {}]" |
| 120 | tens_min, tens_max = cls.tens_dim_range |
| 121 | valid = True |
| 122 | extra = [] |
| 123 | tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens] |
| 124 | if not tensors: |
| 125 | tensors = [tens for tens in op.inputs if tens] |
| 126 | for tens in tensors: |
| 127 | if not all(tens_min <= dim <= tens_max for dim in tens.shape): |
| 128 | valid = False |
| 129 | extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}") |
| 130 | return valid, ", ".join(extra) |
| 131 | |
Patrik Gustavsson | e2bfa7e | 2021-09-08 15:04:11 +0200 | [diff] [blame^] | 132 | # TODO This is for a HW limitation, that is to be resolved in SW later on |
| 133 | @staticmethod |
| 134 | def constraint_rank(op): |
| 135 | "Tensor rank must be <= 4" |
| 136 | valid = True |
| 137 | extra = [] |
| 138 | tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens] |
| 139 | if not tensors: |
| 140 | tensors = [tens for tens in op.inputs if tens] |
| 141 | for tens in tensors: |
| 142 | rank = len(tens.shape) |
| 143 | if not rank <= 4: |
| 144 | valid = False |
| 145 | extra.append(f"Tensor '{tens.name}' has rank: {rank}") |
| 146 | return valid, ", ".join(extra) |
| 147 | |
| 148 | # TODO This is for a HW limitation, that is to be resolved in SW later on |
| 149 | @staticmethod |
| 150 | def constraint_batch(op): |
| 151 | "If Tensor rank is 4 batch of ifms/ofm must be 1" |
| 152 | valid = True |
| 153 | extra = [] |
| 154 | tensors = [tens for tens in op.get_ifm_ifm2_ofm() if tens] |
| 155 | if not tensors: |
| 156 | tensors = [tens for tens in op.inputs if tens] |
| 157 | for tens in tensors: |
| 158 | rank = len(tens.shape) |
| 159 | if rank == 4 and tens.shape[0] != 1: |
| 160 | valid = False |
| 161 | extra.append(f"Tensor '{tens.name}' has rank: 4 and N: {tens.shape[0]}") |
| 162 | return valid, ", ".join(extra) |
| 163 | |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 164 | @staticmethod |
| 165 | def constraint_ifm_producer(cls, op): |
| 166 | "Input must be constant data" |
| 167 | valid = op.ifm.ops and op.ifm.ops[0].type == Op.Const |
| 168 | return valid, "Op has ifm with non-constant data" |
| 169 | |
Patrik Gustavsson | f366fb1 | 2021-09-07 13:30:29 +0200 | [diff] [blame] | 170 | @staticmethod |
| 171 | def constraint_padding(op): |
| 172 | # TODO Only support for when global scaling can be used. |
| 173 | # That is when there is padding no padding |
| 174 | "Avgpool only supported for no padding" |
| 175 | top, left, _, _ = op.attrs["explicit_padding"] |
| 176 | valid = top == 0 and left == 0 |
| 177 | |
| 178 | return valid, "Avgpool with pad_top {top} and pad_left {left}" |
| 179 | |
Patrik Gustavsson | e2bfa7e | 2021-09-08 15:04:11 +0200 | [diff] [blame^] | 180 | # TODO limit padding to be const data for now. |
| 181 | # For TFLite it is assumed to be constant. |
| 182 | @staticmethod |
| 183 | def constraint_padding_producer(op): |
| 184 | "Input must be constant data" |
| 185 | valid = op.inputs[1].ops and op.inputs[1].ops[0].type == Op.Const |
| 186 | return valid, "PAD Op with non-constant data padding" |
| 187 | |
Patrik Gustavsson | f366fb1 | 2021-09-07 13:30:29 +0200 | [diff] [blame] | 188 | # TODO duplicates tflite_supported operators, but support for depth multiplier should be added at a later stage |
Patrik Gustavsson | df99510 | 2021-08-23 15:33:59 +0200 | [diff] [blame] | 189 | @staticmethod |
| 190 | def constraint_depth_multiplier(op): |
| 191 | "For depth multipliers > 1, IFM channels must be 1 and OFM channels must be equal to the depth multiplier" |
| 192 | depth_multiplier = op.attrs.get("depth_multiplier", 1) |
| 193 | if depth_multiplier > 1: |
| 194 | ifm_channels = op.ifm.shape[3] |
| 195 | ofm_channels = op.ofm.shape[3] |
| 196 | valid = (ifm_channels == 1) and (ofm_channels == depth_multiplier) |
| 197 | extra = ( |
| 198 | f"Op has ifm_channels={ifm_channels}, ofm_channels={ofm_channels}" |
| 199 | f" and depth_multiplier={depth_multiplier}" |
| 200 | ) |
| 201 | return valid, extra |
| 202 | return True, "Op has depth_multiplier=1" |