blob: d71e57504cd69e50fb9adce127a953272f858849 [file] [log] [blame]
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +02001# Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# The TosaSupportedOperators class which is a collection of all supported operators and parameter checks.
18from collections import defaultdict
19
20from .data_type import DataType
21from .operation import Op
22from .supported_operators_util import docstring_format_args
23from .supported_operators_util import list_formatter
24from .tosa_mapping import optype_to_tosa_op_type
25
26
27class TosaSupportedOperators:
28 # TODO currently sparsely populated
29 # Categorised lists of supported operators
30 convolution_ops = set((Op.Conv2DBias,))
Patrik Gustavssondf995102021-08-23 15:33:59 +020031 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
32 convolution_like_ops = convolution_ops | depthwise_convolution_ops
33
34 # TODO depending on what will be committed
Patrik Gustavssonc74682c2021-08-17 14:26:38 +020035 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
36 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
Patrik Gustavssondf995102021-08-23 15:33:59 +020037 pooling_ops = max_pooling_ops | avg_pooling_ops
38 fc_vector_products = set((Op.FullyConnected,))
Patrik Gustavssonc74682c2021-08-17 14:26:38 +020039
Patrik Gustavssondf995102021-08-23 15:33:59 +020040 mac_main_ops = convolution_like_ops | pooling_ops | fc_vector_products
Patrik Gustavssonf1580f02021-09-01 12:43:02 +020041 memory_only_ops = set((Op.Reshape, Op.Transpose, Op.Concat, Op.SplitSliceRead,))
Patrik Gustavssonb081d672021-08-25 13:49:25 +020042 binary_elem_wise_add_mul_sub = set((Op.Add, Op.Mul, Op.RescaleMul, Op.Sub,))
Patrik Gustavsson46408a82021-09-20 10:47:47 +020043 elem_wise_ops = binary_elem_wise_add_mul_sub
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020044 type_conversion_ops = set((Op.Rescale,))
Patrik Gustavsson5e26eda2021-06-30 09:07:16 +020045 relu_ops = set((Op.Clamp, Op.ReluN,))
Patrik Gustavssonf436ada2021-09-14 14:56:48 +020046 activation_ops = relu_ops | set((Op.Table,))
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +020047 pad_ops = set((Op.Pad,))
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020048
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +020049 rank_unlimited_ops = set((Op.Concat,))
50 rank6_limited_ops = elem_wise_ops
51 batch_enabled_ops = elem_wise_ops | set((Op.Concat,))
52 large_tens_dims_enabled_ops = elem_wise_ops | set((Op.Concat,))
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020053 npu_post_ops = activation_ops
Patrik Gustavsson46408a82021-09-20 10:47:47 +020054
55 supported_operators = mac_main_ops | type_conversion_ops | npu_post_ops | memory_only_ops | elem_wise_ops | pad_ops
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020056
57 # Supported data types
58 # TODO will differ compared to TensorFlow Lite, currently set to the same
Patrik Gustavssonf1580f02021-09-01 12:43:02 +020059 supported_op_dtypes = set((DataType.uint8, DataType.int8, DataType.int16, DataType.int32)) # TODO add bool
Patrik Gustavssonf366fb12021-09-07 13:30:29 +020060 tens_dim_range = (1, 65535) # TODO HW limitation, that is to be resolved in SW
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020061
62 def __init__(self):
63 # Setup the generic constraints. Note: the order matters
64 self.generic_constraints = []
65 self.generic_constraints.append(TosaSupportedOperators.constraint_tens_dtype)
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +020066 self.generic_constraints.append(TosaSupportedOperators.constraint_tens_dimension) # TODO as not supported yet
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +020067 self.generic_constraints.append(TosaSupportedOperators.constraint_rank) # TODO as not supported for all ops yet
68 self.generic_constraints.append(
69 TosaSupportedOperators.constraint_batch
70 ) # TODO as not supported for all ops yet
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020071
72 # Setup specific constraints. Note: the order matters
73 self.specific_constraints = defaultdict(list)
74
Patrik Gustavssondf995102021-08-23 15:33:59 +020075 self.specific_constraints[Op.Transpose].append(TosaSupportedOperators.constraint_ifm_producer)
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +020076 self.specific_constraints[Op.Pad].append(TosaSupportedOperators.constraint_padding_producer)
Patrik Gustavssonf436ada2021-09-14 14:56:48 +020077 self.specific_constraints[Op.Table].append(TosaSupportedOperators.constraint_table_dtype)
78 self.specific_constraints[Op.Table].append(TosaSupportedOperators.constraint_table_producer)
Patrik Gustavssondf995102021-08-23 15:33:59 +020079
80 # Depthwise Conv specific checks:
81 for op_type in TosaSupportedOperators.depthwise_convolution_ops:
82 self.specific_constraints[op_type].append(TosaSupportedOperators.constraint_depth_multiplier)
83
Patrik Gustavssonf366fb12021-09-07 13:30:29 +020084 # Avgpool specific checks
85 for op_type in TosaSupportedOperators.avg_pooling_ops:
86 self.specific_constraints[op_type].append(TosaSupportedOperators.constraint_padding)
87
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020088 def is_operator_supported(self, op):
89 ext_type = optype_to_tosa_op_type(op.type)
90 if op.type not in TosaSupportedOperators.supported_operators:
91 if op.type not in (Op.Placeholder, Op.SubgraphInput, Op.Const):
92 print(f"Info: {ext_type} '{op.name}' is not a NPU op")
93 return False
94
95 for constraint in self.generic_constraints + self.specific_constraints[op.type]:
96 valid, extra = constraint(op)
97 if not valid:
98 print(f"Warning: {ext_type} '{op.name}' is not supported on the NPU")
99 print(f" - {constraint.__doc__}")
100 if extra:
101 print(f" {extra}")
102 return False
103
104 return True
105
106 # TODO this function is the same for TensorFlow Lite, but input might differ
107 @classmethod
108 @docstring_format_args([list_formatter(supported_op_dtypes)])
109 def constraint_tens_dtype(cls, op):
110 "Tensors must be of type: {}"
111 valid = True
112 extra = []
113 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
114 if not tensors:
115 tensors = [tens for tens in op.inputs if tens]
116 for tens in tensors:
117 if tens.dtype not in cls.supported_op_dtypes:
118 valid = False
119 extra.append(f"Tensor '{tens.name}' has data type: {tens.dtype}")
120 return valid, ", ".join(extra)
Patrik Gustavssondf995102021-08-23 15:33:59 +0200121
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200122 # TODO Duplicates check present for TFLite. But it is only temporarily added
123 # This is for a HW limitation, that is to be resolved in SW later on
124 @classmethod
125 @docstring_format_args(tens_dim_range)
Patrik Gustavsson3f22ec22021-09-21 14:18:44 +0200126 def constraint_tens_dimension(self, op):
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200127 "Tensor dimensions must be in the range [{}, {}]"
Patrik Gustavsson3f22ec22021-09-21 14:18:44 +0200128 tens_min, tens_max = self.tens_dim_range
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200129 valid = True
130 extra = []
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200131 if op.type not in self.large_tens_dims_enabled_ops:
Patrik Gustavsson3f22ec22021-09-21 14:18:44 +0200132 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
133 if not tensors:
134 tensors = [tens for tens in op.inputs if tens]
135 for tens in tensors:
136 if not all(tens_min <= dim <= tens_max for dim in tens.shape):
137 valid = False
138 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200139 return valid, ", ".join(extra)
140
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200141 # TODO This is for a HW limitation, that is to be resolved in SW later on
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200142 @classmethod
143 def constraint_rank(self, op):
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200144 "Tensor rank must be <= 6 or <= 4 depending on operator"
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200145 valid = True
146 extra = []
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200147 if op.type not in self.rank_unlimited_ops:
148 if op.type in self.rank6_limited_ops:
149 rank_limit = 6
150 else:
151 rank_limit = 4
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200152 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
153 if not tensors:
154 tensors = [tens for tens in op.inputs if tens]
155 for tens in tensors:
156 rank = len(tens.shape)
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200157 if not rank <= rank_limit:
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200158 valid = False
159 extra.append(f"Tensor '{tens.name}' has rank: {rank}")
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200160 return valid, ", ".join(extra)
161
162 # TODO This is for a HW limitation, that is to be resolved in SW later on
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200163 @classmethod
164 def constraint_batch(self, op):
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200165 "If Tensor rank is 4 batch of ifms/ofm must be 1"
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200166 valid = True
167 extra = []
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200168 if op.type not in self.batch_enabled_ops:
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200169 tensors = [tens for tens in op.get_ifm_ifm2_ofm() if tens]
170 if not tensors:
171 tensors = [tens for tens in op.inputs if tens]
172 for tens in tensors:
173 rank = len(tens.shape)
174 if rank == 4 and tens.shape[0] != 1:
175 valid = False
176 extra.append(f"Tensor '{tens.name}' has rank: 4 and N: {tens.shape[0]}")
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200177 return valid, ", ".join(extra)
178
Patrik Gustavssondf995102021-08-23 15:33:59 +0200179 @staticmethod
180 def constraint_ifm_producer(cls, op):
181 "Input must be constant data"
182 valid = op.ifm.ops and op.ifm.ops[0].type == Op.Const
183 return valid, "Op has ifm with non-constant data"
184
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200185 @staticmethod
186 def constraint_padding(op):
187 # TODO Only support for when global scaling can be used.
188 # That is when there is padding no padding
189 "Avgpool only supported for no padding"
190 top, left, _, _ = op.attrs["explicit_padding"]
191 valid = top == 0 and left == 0
192
193 return valid, "Avgpool with pad_top {top} and pad_left {left}"
194
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200195 # TODO limit padding to be const data for now.
196 # For TFLite it is assumed to be constant.
197 @staticmethod
198 def constraint_padding_producer(op):
199 "Input must be constant data"
200 valid = op.inputs[1].ops and op.inputs[1].ops[0].type == Op.Const
201 return valid, "PAD Op with non-constant data padding"
202
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200203 # TODO duplicates tflite_supported operators, but support for depth multiplier should be added at a later stage
Patrik Gustavssondf995102021-08-23 15:33:59 +0200204 @staticmethod
205 def constraint_depth_multiplier(op):
206 "For depth multipliers > 1, IFM channels must be 1 and OFM channels must be equal to the depth multiplier"
207 depth_multiplier = op.attrs.get("depth_multiplier", 1)
208 if depth_multiplier > 1:
209 ifm_channels = op.ifm.shape[3]
210 ofm_channels = op.ofm.shape[3]
211 valid = (ifm_channels == 1) and (ofm_channels == depth_multiplier)
212 extra = (
213 f"Op has ifm_channels={ifm_channels}, ofm_channels={ofm_channels}"
214 f" and depth_multiplier={depth_multiplier}"
215 )
216 return valid, extra
217 return True, "Op has depth_multiplier=1"
Patrik Gustavssonf436ada2021-09-14 14:56:48 +0200218
219 # TODO Table operator support limited to int8 for now.
220 # For TFLite it is assumed to be constant.
221 @staticmethod
222 def constraint_table_dtype(op):
223 "Only supported is int8"
224 valid = True
225 tensors = [op.ifm, op.ofm, op.inputs[1]]
226 for tens in tensors:
227 if tens.dtype != DataType.int8:
228 valid = False
229 return valid, "Table operator with non int8 tensor"
230
231 # TODO limit table to be constant data for now.
232 # Can it be non-constant?
233 @staticmethod
234 def constraint_table_producer(op):
235 "Input must be constant data"
236 valid = op.inputs[1].ops and op.inputs[1].ops[0].type == Op.Const
237 return valid, "Table Op with non-constant table input"