blob: 3b39ce6ff909a24958de324093c48d8d4b12c136 [file] [log] [blame]
Rickard Bolinbc6ee582022-11-04 08:24:29 +00001# SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Rickard Bolinbc6ee582022-11-04 08:24:29 +000016#
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020017# Description:
18# The TosaSupportedOperators class which is a collection of all supported operators and parameter checks.
19from collections import defaultdict
20
21from .data_type import DataType
22from .operation import Op
23from .supported_operators_util import docstring_format_args
24from .supported_operators_util import list_formatter
25from .tosa_mapping import optype_to_tosa_op_type
26
27
28class TosaSupportedOperators:
29 # TODO currently sparsely populated
30 # Categorised lists of supported operators
31 convolution_ops = set((Op.Conv2DBias,))
Patrik Gustavssondf995102021-08-23 15:33:59 +020032 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
33 convolution_like_ops = convolution_ops | depthwise_convolution_ops
34
35 # TODO depending on what will be committed
Patrik Gustavssonc74682c2021-08-17 14:26:38 +020036 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
37 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
Patrik Gustavssondf995102021-08-23 15:33:59 +020038 pooling_ops = max_pooling_ops | avg_pooling_ops
39 fc_vector_products = set((Op.FullyConnected,))
Patrik Gustavssonc74682c2021-08-17 14:26:38 +020040
Patrik Gustavssondf995102021-08-23 15:33:59 +020041 mac_main_ops = convolution_like_ops | pooling_ops | fc_vector_products
Jonas Ohlssond8575072022-03-30 10:30:25 +020042 memory_only_ops = set(
43 (
44 Op.Reshape,
45 Op.Transpose,
46 Op.Concat,
47 Op.SplitSliceRead,
48 )
49 )
50 binary_elem_wise_add_mul_sub = set(
51 (
52 Op.Add,
53 Op.Mul,
Jonas Ohlssond8575072022-03-30 10:30:25 +020054 Op.Sub,
55 )
56 )
Patrik Gustavsson46408a82021-09-20 10:47:47 +020057 elem_wise_ops = binary_elem_wise_add_mul_sub
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020058 type_conversion_ops = set((Op.Rescale,))
Jonas Ohlssond8575072022-03-30 10:30:25 +020059 relu_ops = set(
60 (
61 Op.Clamp,
62 Op.ReluN,
63 )
64 )
Patrik Gustavssonf436ada2021-09-14 14:56:48 +020065 activation_ops = relu_ops | set((Op.Table,))
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +020066 pad_ops = set((Op.Pad,))
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020067
Patrik Gustavssonb4936ad2021-10-05 13:53:34 +020068 rank_unlimited_ops = set((Op.Concat, Op.Reshape, Op.Identity, Op.Pad))
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +020069 rank6_limited_ops = elem_wise_ops
Patrik Gustavsson008cd102021-09-24 13:46:42 +020070 batch_enabled_ops = rank6_limited_ops | rank_unlimited_ops
71 large_tens_dims_enabled_ops = batch_enabled_ops | set((Op.SplitSliceRead,))
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020072 npu_post_ops = activation_ops
Patrik Gustavsson46408a82021-09-20 10:47:47 +020073
Patrik Gustavssonef3ebdd2021-10-01 11:10:25 +020074 supported_operators = (
75 mac_main_ops
76 | type_conversion_ops
77 | npu_post_ops
78 | memory_only_ops
79 | elem_wise_ops
80 | pad_ops
81 | set((Op.Identity,))
82 )
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020083
84 # Supported data types
85 # TODO will differ compared to TensorFlow Lite, currently set to the same
Patrik Gustavssonf1580f02021-09-01 12:43:02 +020086 supported_op_dtypes = set((DataType.uint8, DataType.int8, DataType.int16, DataType.int32)) # TODO add bool
Patrik Gustavssonf366fb12021-09-07 13:30:29 +020087 tens_dim_range = (1, 65535) # TODO HW limitation, that is to be resolved in SW
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020088
89 def __init__(self):
90 # Setup the generic constraints. Note: the order matters
91 self.generic_constraints = []
92 self.generic_constraints.append(TosaSupportedOperators.constraint_tens_dtype)
Patrik Gustavsson008cd102021-09-24 13:46:42 +020093 self.generic_constraints.append(TosaSupportedOperators.constraint_tens_dimension) # TODO not supported yet
94 self.generic_constraints.append(TosaSupportedOperators.constraint_rank) # TODO not supported for all ops yet
95 self.generic_constraints.append(TosaSupportedOperators.constraint_batch) # TODO not supported for all ops yet
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020096
Fredrik Svedberg88d5b122022-09-16 16:24:55 +020097 # Setup generic constraint exceptions
98 self.generic_constraints_exceptions = defaultdict(list)
99
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200100 # Setup specific constraints. Note: the order matters
101 self.specific_constraints = defaultdict(list)
102
Patrik Gustavssondf995102021-08-23 15:33:59 +0200103 self.specific_constraints[Op.Transpose].append(TosaSupportedOperators.constraint_ifm_producer)
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200104 self.specific_constraints[Op.Pad].append(TosaSupportedOperators.constraint_padding_producer)
Patrik Gustavssonf436ada2021-09-14 14:56:48 +0200105 self.specific_constraints[Op.Table].append(TosaSupportedOperators.constraint_table_dtype)
106 self.specific_constraints[Op.Table].append(TosaSupportedOperators.constraint_table_producer)
Patrik Gustavssondf995102021-08-23 15:33:59 +0200107
108 # Depthwise Conv specific checks:
109 for op_type in TosaSupportedOperators.depthwise_convolution_ops:
110 self.specific_constraints[op_type].append(TosaSupportedOperators.constraint_depth_multiplier)
111
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200112 # Avgpool specific checks
113 for op_type in TosaSupportedOperators.avg_pooling_ops:
114 self.specific_constraints[op_type].append(TosaSupportedOperators.constraint_padding)
115
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200116 def is_operator_supported(self, op):
117 ext_type = optype_to_tosa_op_type(op.type)
118 if op.type not in TosaSupportedOperators.supported_operators:
119 if op.type not in (Op.Placeholder, Op.SubgraphInput, Op.Const):
120 print(f"Info: {ext_type} '{op.name}' is not a NPU op")
121 return False
122
123 for constraint in self.generic_constraints + self.specific_constraints[op.type]:
124 valid, extra = constraint(op)
125 if not valid:
126 print(f"Warning: {ext_type} '{op.name}' is not supported on the NPU")
127 print(f" - {constraint.__doc__}")
128 if extra:
129 print(f" {extra}")
130 return False
131
132 return True
133
134 # TODO this function is the same for TensorFlow Lite, but input might differ
135 @classmethod
136 @docstring_format_args([list_formatter(supported_op_dtypes)])
137 def constraint_tens_dtype(cls, op):
138 "Tensors must be of type: {}"
139 valid = True
140 extra = []
141 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
142 if not tensors:
143 tensors = [tens for tens in op.inputs if tens]
144 for tens in tensors:
145 if tens.dtype not in cls.supported_op_dtypes:
146 valid = False
147 extra.append(f"Tensor '{tens.name}' has data type: {tens.dtype}")
148 return valid, ", ".join(extra)
Patrik Gustavssondf995102021-08-23 15:33:59 +0200149
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200150 # TODO Duplicates check present for TFLite. But it is only temporarily added
151 # This is for a HW limitation, that is to be resolved in SW later on
152 @classmethod
153 @docstring_format_args(tens_dim_range)
Patrik Gustavsson3f22ec22021-09-21 14:18:44 +0200154 def constraint_tens_dimension(self, op):
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200155 "Tensor dimensions must be in the range [{}, {}]"
Patrik Gustavsson3f22ec22021-09-21 14:18:44 +0200156 tens_min, tens_max = self.tens_dim_range
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200157 valid = True
158 extra = []
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200159 if op.type not in self.large_tens_dims_enabled_ops:
Patrik Gustavsson3f22ec22021-09-21 14:18:44 +0200160 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
161 if not tensors:
162 tensors = [tens for tens in op.inputs if tens]
163 for tens in tensors:
164 if not all(tens_min <= dim <= tens_max for dim in tens.shape):
165 valid = False
166 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200167 return valid, ", ".join(extra)
168
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200169 # TODO This is for a HW limitation, that is to be resolved in SW later on
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200170 @classmethod
171 def constraint_rank(self, op):
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200172 "Tensor rank must be <= 6 or <= 4 depending on operator"
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200173 valid = True
174 extra = []
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200175 if op.type not in self.rank_unlimited_ops:
176 if op.type in self.rank6_limited_ops:
177 rank_limit = 6
178 else:
179 rank_limit = 4
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200180 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
181 if not tensors:
182 tensors = [tens for tens in op.inputs if tens]
183 for tens in tensors:
184 rank = len(tens.shape)
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200185 if not rank <= rank_limit:
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200186 valid = False
Patrik Gustavsson008cd102021-09-24 13:46:42 +0200187 extra.append(
188 f"Tensor '{tens.name}' has rank: {rank}, rank limit is currently {rank_limit}"
189 f" for op of type {op.type}"
190 )
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200191 return valid, ", ".join(extra)
192
193 # TODO This is for a HW limitation, that is to be resolved in SW later on
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200194 @classmethod
195 def constraint_batch(self, op):
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200196 "If Tensor rank is 4 batch of ifms/ofm must be 1"
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200197 valid = True
198 extra = []
Patrik Gustavssonc2b129d2021-09-23 13:52:34 +0200199 if op.type not in self.batch_enabled_ops:
Patrik Gustavsson46408a82021-09-20 10:47:47 +0200200 tensors = [tens for tens in op.get_ifm_ifm2_ofm() if tens]
201 if not tensors:
202 tensors = [tens for tens in op.inputs if tens]
203 for tens in tensors:
204 rank = len(tens.shape)
205 if rank == 4 and tens.shape[0] != 1:
206 valid = False
207 extra.append(f"Tensor '{tens.name}' has rank: 4 and N: {tens.shape[0]}")
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200208 return valid, ", ".join(extra)
209
Patrik Gustavssondf995102021-08-23 15:33:59 +0200210 @staticmethod
211 def constraint_ifm_producer(cls, op):
212 "Input must be constant data"
213 valid = op.ifm.ops and op.ifm.ops[0].type == Op.Const
214 return valid, "Op has ifm with non-constant data"
215
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200216 @staticmethod
217 def constraint_padding(op):
218 # TODO Only support for when global scaling can be used.
219 # That is when there is padding no padding
220 "Avgpool only supported for no padding"
221 top, left, _, _ = op.attrs["explicit_padding"]
222 valid = top == 0 and left == 0
223
224 return valid, "Avgpool with pad_top {top} and pad_left {left}"
225
Patrik Gustavssone2bfa7e2021-09-08 15:04:11 +0200226 # TODO limit padding to be const data for now.
227 # For TFLite it is assumed to be constant.
228 @staticmethod
229 def constraint_padding_producer(op):
230 "Input must be constant data"
231 valid = op.inputs[1].ops and op.inputs[1].ops[0].type == Op.Const
232 return valid, "PAD Op with non-constant data padding"
233
Patrik Gustavssonf366fb12021-09-07 13:30:29 +0200234 # TODO duplicates tflite_supported operators, but support for depth multiplier should be added at a later stage
Patrik Gustavssondf995102021-08-23 15:33:59 +0200235 @staticmethod
236 def constraint_depth_multiplier(op):
237 "For depth multipliers > 1, IFM channels must be 1 and OFM channels must be equal to the depth multiplier"
238 depth_multiplier = op.attrs.get("depth_multiplier", 1)
239 if depth_multiplier > 1:
240 ifm_channels = op.ifm.shape[3]
241 ofm_channels = op.ofm.shape[3]
242 valid = (ifm_channels == 1) and (ofm_channels == depth_multiplier)
243 extra = (
244 f"Op has ifm_channels={ifm_channels}, ofm_channels={ofm_channels}"
245 f" and depth_multiplier={depth_multiplier}"
246 )
247 return valid, extra
248 return True, "Op has depth_multiplier=1"
Patrik Gustavssonf436ada2021-09-14 14:56:48 +0200249
250 # TODO Table operator support limited to int8 for now.
251 # For TFLite it is assumed to be constant.
252 @staticmethod
253 def constraint_table_dtype(op):
254 "Only supported is int8"
255 valid = True
256 tensors = [op.ifm, op.ofm, op.inputs[1]]
257 for tens in tensors:
258 if tens.dtype != DataType.int8:
259 valid = False
260 return valid, "Table operator with non int8 tensor"
261
262 # TODO limit table to be constant data for now.
263 # Can it be non-constant?
264 @staticmethod
265 def constraint_table_producer(op):
266 "Input must be constant data"
267 valid = op.inputs[1].ops and op.inputs[1].ops[0].type == Op.Const
268 return valid, "Table Op with non-constant table input"