blob: eff40bc5a6aea09d9e05d5d68ac90069becb1d40 [file] [log] [blame]
Johan Alfven12e48112023-01-31 10:26:26 +01001# SPDX-FileCopyrightText: Copyright 2021-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Rickard Bolinbc6ee582022-11-04 08:24:29 +000016#
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020017# Description:
18# The TFLiteSemantic class which is a collection of TensorFlow lite model semantic checks.
19from collections import defaultdict
20
21import numpy as np
22
23from .data_type import BaseType
24from .data_type import DataType
25from .numeric_util import is_integer
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020026from .operation import Op
27from .supported_operators_util import docstring_format_args
28from .supported_operators_util import list_formatter
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020029from .tensor import check_quantized_tens_scaling_equal
Johan Alfven3ac03be2023-03-01 09:53:35 +010030from .tensor import shape_num_elements
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020031from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
32from .tflite_mapping import optype_to_builtintype
33
34
35def _optype_formatter(op_list):
36 # Convert internal op types to external names
37 output = map(optype_to_builtintype, op_list)
38 # Remove UNKNOWNs
39 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
40 return list_formatter(output)
41
42
43class TFLiteSemantic:
44 # Categorised lists of operators
Jonas Ohlssond8575072022-03-30 10:30:25 +020045 convolution_ops = set(
46 (
47 Op.Conv2DBias,
48 Op.Conv2D,
49 Op.QuantizedConv2D,
50 )
51 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020052 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
53 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
54 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
55 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
56 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
57 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
58 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Jonas Ohlssond8575072022-03-30 10:30:25 +020059 binary_elem_wise_min_max_ops = set(
60 (
61 Op.Minimum,
62 Op.Maximum,
63 )
64 )
65 binary_elem_wise_shift_ops = set(
66 (
67 Op.SHL,
68 Op.SHR,
69 )
70 )
71 binary_elem_wise_add_mul_sub = set(
72 (
73 Op.Add,
74 Op.Mul,
75 Op.Sub,
76 )
77 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020078 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
Johan Alfven906c9e82023-05-25 11:18:50 +020079 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops | set((Op.SquaredDifference,))
Rickard Bolin6986a072022-12-19 12:33:40 +000080 shapeless_input_ops = binary_elem_wise_main_ops | set(
81 (Op.Split, Op.SplitV, Op.Mean, Op.ExpandDims, Op.Quantize, Op.ArgMax)
82 )
Jonas Ohlssond8575072022-03-30 10:30:25 +020083 reshape_ops = set(
84 (
85 Op.Reshape,
86 Op.QuantizedReshape,
87 Op.Squeeze,
88 Op.ExpandDims,
89 )
90 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020091
92 def __init__(self):
93 # Setup the generic constraints. Note: the order matters
94 self.generic_constraints = []
Tim Hall2180a172023-03-10 18:11:34 +000095 self.generic_constraints.append(TFLiteSemantic.constraint_attributes_specified)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020096 self.generic_constraints.append(TFLiteSemantic.constraint_tens_no_dynamic)
97 self.generic_constraints.append(TFLiteSemantic.constraint_tens_defined_shape)
98 self.generic_constraints.append(TFLiteSemantic.constraint_tens_output_scalar)
99 self.generic_constraints.append(TFLiteSemantic.constraint_tens_input_scalar)
100 self.generic_constraints.append(TFLiteSemantic.constraint_tens_shape_size)
101
102 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_none_check)
103 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_scale)
104 self.generic_constraints.append(TFLiteSemantic.constraint_quant_scale_inf)
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100105 self.generic_constraints.append(TFLiteSemantic.constraint_none_const_tensors)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200106
107 # Setup specific constraints. Note: the order matters
108 self.specific_constraints = defaultdict(list)
109
110 # Conv-like checks:
111 for op_type in TFLiteSemantic.convolution_like_ops:
112 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
Tim Hall9cf63a32023-06-27 12:07:49 +0100113 if op_type in TFLiteSemantic.convolution_ops:
114 # Only Conv has groups
115 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_conv_groups_ifm_depth)
116 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_conv_groups_num_filters)
Tim Hallea4ba662022-11-11 18:19:53 +0000117 if op_type not in TFLiteSemantic.transpose_convolution_ops:
118 # Transpose Conv does not contain dilation
119 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_dilation_type)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200120
121 # Pooling checks:
122 for op_type in TFLiteSemantic.pooling_ops:
123 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
124 # AVG pooling specific checks:
125 for op_type in TFLiteSemantic.avg_pooling_ops:
126 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
127 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
128 # MAX pooling specific checks:
129 for op_type in TFLiteSemantic.max_pooling_ops:
130 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
131 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
132
133 # Concat specific checks:
134 for op_type in (Op.Concat, Op.ConcatTFLite):
135 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_exists)
136 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_valid)
137 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_dimensionality)
138 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_valid_dimensions)
Johan Alfvénb3932512022-09-12 17:44:25 +0200139 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_valid_dimensions_axis)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200140
141 # Element-wise checks:
142 for op_type in TFLiteSemantic.elem_wise_main_ops:
143 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_either_shapes)
144 # Unary specific checks:
145 for op_type in TFLiteSemantic.unary_elem_wise_main_ops:
146 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
147 # Binary Min/Max specific checks:
148 for op_type in TFLiteSemantic.binary_elem_wise_min_max_ops:
149 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
150 # Binary Add/Mul/Sub specific checks:
151 for op_type in TFLiteSemantic.binary_elem_wise_add_mul_sub:
152 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_inputs_types)
153 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_signed)
154 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_unsigned_valid)
155
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200156 # Ops reshaping dimensions: Reshape, Squeeze and ExpandDims
157 for op_type in TFLiteSemantic.reshape_ops:
158 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_quant)
Johan Alfven3ac03be2023-03-01 09:53:35 +0100159 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_elements)
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200160
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200161 # Softmax specific checks:
162 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_shapes)
163 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_in_out_types)
164 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_beta_value_range)
165
Johan Alfven12e48112023-01-31 10:26:26 +0100166 # Split specific checks:
167 self.specific_constraints[Op.Split].append(TFLiteSemantic.constraint_split_axis)
168 self.specific_constraints[Op.Split].append(TFLiteSemantic.constraint_split_num_splits)
169
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200170 # SplitV specific checks:
171 self.specific_constraints[Op.SplitV].append(TFLiteSemantic.constraint_splitv_inferred)
172
173 # StridedSlice specific checks:
174 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_input_count)
175 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_inputs_const)
176 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_ellipsis_mask)
177 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_axis_masks)
178 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_slice_ranges)
179
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200180 # FullyConnected specific checks:
181 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_fc_output_2d)
182 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_keep_dim_ifm_ofm)
183
184 # Pad specific checks:
185 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_input_count)
186 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_constant)
Johan Gunnarsson81b765d2023-08-04 17:16:29 +0200187 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_output_shape)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200188
189 # HardSwish specific checks:
190 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_input_8bit)
191 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_matching_in_out_types)
Fredrik Svedberg701ba912022-09-07 16:01:15 +0200192
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200193 # Mean specific checks:
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200194 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_input_dims)
195 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_axis)
196
Rickard Bolin6986a072022-12-19 12:33:40 +0000197 # ArgMax specific checks:
198 self.specific_constraints[Op.ArgMax].append(TFLiteSemantic.constraint_input_8bit)
Johan Alfvenc1ad80b2023-03-31 10:19:23 +0200199 self.specific_constraints[Op.ArgMax].append(TFLiteSemantic.constraint_argmax_output)
Rickard Bolin6986a072022-12-19 12:33:40 +0000200
Fredrik Svedberg0ac08042023-04-11 22:35:04 +0200201 # UnidirectionalSequenceLstm specific checks:
202 self.specific_constraints[Op.UnidirectionalSequenceLstm].append(TFLiteSemantic.constraint_input_signed)
203 self.specific_constraints[Op.UnidirectionalSequenceLstm].append(TFLiteSemantic.constraint_matching_in_out_types)
204 self.specific_constraints[Op.UnidirectionalSequenceLstm].append(TFLiteSemantic.constraint_lstm_dimensions)
205 self.specific_constraints[Op.UnidirectionalSequenceLstm].append(TFLiteSemantic.constraint_lstm_inputs)
206 self.specific_constraints[Op.UnidirectionalSequenceLstm].append(TFLiteSemantic.constraint_lstm_intermediates)
207 self.specific_constraints[Op.UnidirectionalSequenceLstm].append(TFLiteSemantic.constraint_lstm_variables)
208
Johan Alfvence502732023-04-24 13:35:40 +0200209 # Exp specific checks
210 self.specific_constraints[Op.Exp].append(TFLiteSemantic.constraint_input_signed)
211
Johan Alfvenf418e832023-11-13 10:23:32 +0100212 # Transpose specific checks
213 self.specific_constraints[Op.Transpose].append(TFLiteSemantic.constraint_transpose_permutation_size)
214 self.specific_constraints[Op.Transpose].append(TFLiteSemantic.constraint_transpose_permutation_values)
215
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200216 def is_operator_semantic_valid(self, op):
217 ext_type = optype_to_builtintype(op.type)
218
219 if op.type in (Op.Placeholder, Op.SubgraphInput, Op.Const):
220 return True
221
Ayaan Masood4965fae2022-06-29 11:30:57 +0100222 # Generic constraints list filtered out to exclude certain constraints depending on op.type
223 filtered_generic_constraints = []
224
225 for constraint in self.generic_constraints:
226 # Check constraint not in dictionary otherwise return empty array
227 if constraint not in self.get_generic_constraint_exclude_list().get(op.type, []):
228 filtered_generic_constraints.append(constraint)
229
230 for constraint in filtered_generic_constraints + self.specific_constraints[op.type]:
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200231 valid, extra = constraint(op)
232 if not valid:
233 print(
Tim Hall3584a9c2021-11-18 22:05:17 +0000234 f"Warning: Unsupported TensorFlow Lite semantics for {ext_type} '{op.name}'. Placing on CPU instead"
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200235 )
236 print(f" - {constraint.__doc__}")
237 if extra:
238 print(f" {extra}")
239 return False
240
241 return True
242
243 @staticmethod
Ayaan Masood4965fae2022-06-29 11:30:57 +0100244 def get_generic_constraint_exclude_list():
245
246 # Not all generic constraints can be applied to each operator
247 generic_constraints_exclude_list = {
248 Op.Shape: [
249 TFLiteSemantic.constraint_tens_quant_none_check,
Ayaan Masood25f48dd2022-06-29 18:16:04 +0100250 ],
251 Op.Quantize: [
252 TFLiteSemantic.constraint_tens_no_dynamic,
253 TFLiteSemantic.constraint_tens_output_scalar,
Ayaan Masood25f48dd2022-06-29 18:16:04 +0100254 ],
Rickard Bolin6986a072022-12-19 12:33:40 +0000255 Op.ArgMax: [
256 TFLiteSemantic.constraint_tens_quant_none_check,
257 ],
Johan Alfvena8fda882023-10-28 16:04:46 +0200258 Op.Transpose: [
259 TFLiteSemantic.constraint_tens_quant_none_check,
260 ],
Ayaan Masood4965fae2022-06-29 11:30:57 +0100261 }
262 return generic_constraints_exclude_list
263
264 @staticmethod
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100265 def constraint_none_const_tensors(op):
266 "Constant tensors should not have NoneType-values"
267 valid = True
268 extra = ""
269 for tens in filter(None, op.inputs):
270 if len(tens.ops) > 0 and tens.ops[0].type == Op.Const and tens.values is None:
271 valid = False
272 extra = str(tens.name)
273 return valid, f"Unexpected None value for constant tensor: {extra}"
274
275 @staticmethod
Tim Hall2180a172023-03-10 18:11:34 +0000276 def constraint_attributes_specified(op):
277 "All required operator attributes must be specified"
278 # operators that have been created internally (i.e. not created as part of reading an input network) may not
279 # have the read error attribute
280 attribute_read_error = op.attrs.get("attribute_read_error", [])
281 valid = len(attribute_read_error) == 0
282 extra = ", ".join(attribute_read_error)
283 return valid, f"Op has missing attributes: {extra}"
284
285 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200286 def constraint_tens_no_dynamic(op):
287 "Input(s) and Output tensors must not be dynamic"
288 valid = True
289 extra = []
290 tensors = [tens for tens in op.inputs + op.outputs if tens]
291 for tens in tensors:
292 if (tens.shape == []) and (tens.values is None):
293 valid = False
294 extra.append(tens.name)
295 extra = ", ".join(extra)
296 return valid, f"Op has dynamic tensor(s): {extra}"
297
298 @staticmethod
299 def constraint_tens_defined_shape(op):
300 "Input(s) and Output tensors must have a defined shape"
301 valid = True
302 extra = []
303 tensors = [tens for tens in op.inputs + op.outputs if tens]
304 for tens in tensors:
305 if not tens.has_fully_defined_shape():
306 valid = False
307 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
308 return valid, ", ".join(extra)
309
310 @staticmethod
311 def constraint_tens_output_scalar(op):
312 "Output tensors cannot be scalar"
313 ofm = op.ofm
314 valid = ofm.shape != []
315 return valid, f"Output Tensor '{ofm.name}' is scalar"
316
317 @classmethod
318 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
319 def constraint_tens_input_scalar(cls, op):
320 "Scalar Input tensors are only valid for op type: {}"
321 valid = True
322 extra = []
323 tensors = [tens for tens in op.inputs if tens]
324 for tens in tensors:
325 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
326 valid = False
327 extra.append(tens.name)
328 extra = ", ".join(extra)
329 return valid, f"Op has scalar input tensor(s): {extra}"
330
331 @staticmethod
332 def constraint_tens_shape_size(op):
333 "Input(s) and Output tensors must not be greater than 4D"
334 valid = True
335 extra = []
336 tensors = [tens for tens in op.inputs + op.outputs if tens]
337 for tens in tensors:
338 if len(tens.shape) > 4:
339 valid = False
340 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
341 return valid, ", ".join(extra)
342
343 @staticmethod
344 def constraint_tens_quant_none_check(op):
345 "Input(s), Output and Weight tensors must have quantization parameters"
346 valid = True
347 extra = []
348 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
349 for tens in tensors:
350 if tens.quantization is None:
351 valid = False
352 extra.append(tens.name)
353 extra = ", ".join(extra)
354 return valid, f"Op has tensors with missing quantization parameters: {extra}"
355
356 @staticmethod
357 def constraint_tens_quant_scale(op):
358 "Input(s), Output and Weight tensors with quantization scales must be finite"
359 valid = True
360 extra = []
361 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
362 for tens in tensors:
Fredrik Svedberg11563172022-07-06 14:54:12 +0200363 if (
364 tens.quantization
365 and tens.quantization.scale_f32 is not None
366 and np.isinf(tens.quantization.scale_f32).any()
367 ):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200368 valid = False
369 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
370 return valid, ", ".join(extra)
371
372 @staticmethod
373 def constraint_fc_output_2d(op):
Ayaan Masooda2ec5aa2022-04-21 14:28:03 +0100374 """The output tensor(s) must have 2D shape"""
375 valid = op.ifm.get_shape_as_2d(op.weights.shape[-2]) is not None
376 extra = f"Op has non-2D output tensor '{op.ofm.name}'" if not valid else ""
377
378 return valid, extra
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200379
380 @staticmethod
381 def constraint_stride_type(op):
382 "Stride values for both width and height must be integer types"
383 w, h = op.get_kernel_stride()
384 valid = is_integer(w) and is_integer(h)
385 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
386
387 @staticmethod
Tim Hall9cf63a32023-06-27 12:07:49 +0100388 def constraint_conv_groups_ifm_depth(op):
389 """IFM depth must be a whole multiple of the filter kernel depth"""
390 ifm_depth = op.ifm.shape[-1] # nhwc
391 kernel_ic = op.weights.shape[-2] # hwio
392 num_conv_groups = ifm_depth // kernel_ic
393
394 if ifm_depth % kernel_ic == 0:
395 op.attrs["num_conv_groups"] = num_conv_groups
396 valid = True
397 else:
398 valid = False
399
400 return valid, f"IFM depth = {ifm_depth} and filter kernel depth = {kernel_ic}"
401
402 @staticmethod
403 def constraint_conv_groups_num_filters(op):
404 """Number of filter kernels must be equally divisible by the number of convolution groups"""
405 ifm_depth = op.ifm.shape[-1] # nhwc
406 kernel_ic = op.weights.shape[-2] # hwio
407 kernel_oc = op.weights.shape[-1] # hwio
408 num_conv_groups = ifm_depth // kernel_ic
409
410 if kernel_oc % num_conv_groups == 0:
411 valid = True
412 else:
413 valid = False
414
415 return valid, f"Filter kernels = {kernel_oc} and convolution groups = {num_conv_groups}"
416
417 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200418 def constraint_dilation_type(op):
419 "Dilation factor values for both width and height must be integer types"
420 w, h = op.get_kernel_dilation()
421 valid = is_integer(w) and is_integer(h)
422 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
423
424 @staticmethod
425 def constraint_quant_scale_inf(op):
426 "Input and Output tensors must have quantization scales that fit within float32 precision"
427 if op.ofm is not None and op.ofm.is_quantized():
428 ofm_scale = op.ofm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200429 if np.any(ofm_scale < np.finfo(np.float32).tiny):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200430 return (
431 False,
432 f"The quantization scale of the output tensor is {ofm_scale}, "
433 + f"minimum supported is: {np.finfo(np.float32).tiny}",
434 )
435 if op.ifm is not None and op.ifm.is_quantized():
436 ifm_scale = op.ifm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200437 if np.any(np.isinf(ifm_scale / ofm_scale)):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200438 return (
439 False,
440 f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
441 )
442 return True, "Op's quantization is ok"
443
444 @staticmethod
445 def constraint_matching_in_out_types(op):
446 "IFM and OFM data types must match"
447 ifm_dtype = op.ifm.dtype
448 ofm_dtype = op.ofm.dtype
449 valid = ifm_dtype == ofm_dtype
450 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
451
452 @staticmethod
453 def constraint_beta_value_range(op):
454 "Beta value needs to be positive"
455 beta = op.attrs.get("beta", 1.0)
456 valid = beta >= 0
457 return valid, f"Op has beta={beta}"
458
459 @staticmethod
460 def constraint_filter_type(op):
461 "Kernel filter values for both width and height must be integer types"
462 w = op.kernel.width
463 h = op.kernel.height
464 valid = is_integer(w) and is_integer(h)
465 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
466
467 @staticmethod
468 def constraint_matching_shapes(op):
469 "IFM and OFM shapes must match"
470 ifm_shape = op.ifm.shape
471 ofm_shape = op.ofm.shape
472 valid = ifm_shape == ofm_shape
473 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
474
475 @staticmethod
Johan Alfven12e48112023-01-31 10:26:26 +0100476 def constraint_split_axis(op):
477 "Axis value must be in the range [-RANK(IFM) to +RANK(IFM))"
478 axis_tens = op.inputs[0]
479 input_tens = op.inputs[1]
480 dims = len(input_tens.shape)
Tim Hall762d3ac2023-07-06 11:42:02 +0100481 # handle axis being a scalar or 1-D array
William Isaksson75d34022023-08-10 12:22:44 +0000482 if axis_tens.values.ndim == 0:
483 axis = int(axis_tens.values)
484 else:
485 axis = int(axis_tens.values[0])
Johan Alfven12e48112023-01-31 10:26:26 +0100486 axis += dims if axis < 0 else 0
487 valid = 0 <= axis < dims
488 return valid, f"Op has ifm_dimensions={dims} and axis value is: {axis}"
489
490 @staticmethod
491 def constraint_split_num_splits(op):
492 "Axis must be divisible by number of splits"
493 num_splits = op.attrs.get("num_splits")
494 axis_tens = op.inputs[0]
495 input_tens = op.inputs[1]
496 dims = len(input_tens.shape)
Tim Hall762d3ac2023-07-06 11:42:02 +0100497 # handle axis being a scalar or 1-D array
William Isaksson75d34022023-08-10 12:22:44 +0000498 if axis_tens.values.ndim == 0:
499 axis = int(axis_tens.values)
500 else:
501 axis = int(axis_tens.values[0])
Johan Alfven12e48112023-01-31 10:26:26 +0100502 axis += dims if axis < 0 else 0
503 valid = input_tens.shape[axis] % num_splits == 0
504 return valid, f"Op has ifm shape={input_tens.shape} axis={axis} num_splits={num_splits}"
505
506 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200507 def constraint_splitv_inferred(op):
508 "Only one size is allowed to be inferred"
509 sizes = op.inputs[1].values
510 valid = np.count_nonzero(sizes == -1) <= 1
511 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
512
513 @staticmethod
514 def constraint_axis_exists(op):
515 "Axis attribute must exist"
516 axis = op.attrs.get("axis")
517 valid = axis is not None
518 return valid, f"Op has axis={axis}"
519
520 @staticmethod
521 def constraint_axis_valid(op):
522 "Axis attribute must be in the range [0, <ofm_dimensions>)"
523 dims = len(op.ofm.shape)
524 axis = op.attrs["axis"]
525 axis += dims if axis < 0 else 0
526 valid = 0 <= axis < dims
527 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
528
529 @staticmethod
530 def constraint_matching_dimensionality(op):
531 "All Input dimensionalities must match OFM dimensionality"
532 valid = True
533 extra = []
534 ofm_dim = len(op.ofm.shape)
535 tensors = [tens for tens in op.inputs if tens]
536 for tens in tensors:
537 dim = len(tens.shape)
538 if dim != ofm_dim:
539 valid = False
540 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
541 extra = ", ".join(extra)
542 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
543
544 @staticmethod
545 def constraint_valid_dimensions(op):
546 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
547 valid = True
548 extra = []
549 ofm_shape = op.ofm.shape
550 ofm_dim = len(ofm_shape)
551 axis = op.attrs["axis"]
552 axis += ofm_dim if axis < 0 else 0
553 tensors = [tens for tens in op.inputs if tens]
554 for tens in tensors:
555 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
556 valid = False
557 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
558 extra = ", ".join(extra)
559 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
560
561 @staticmethod
Johan Alfvénb3932512022-09-12 17:44:25 +0200562 def constraint_valid_dimensions_axis(op):
563 """The size of the OFM axis must match the sum of all IFM axis defined by the axis attribute"""
564 valid = True
565 extra = []
566 ofm_shape = op.ofm.shape
567 ofm_dim = len(ofm_shape)
568 axis = op.attrs["axis"]
569 axis += ofm_dim if axis < 0 else 0
570
571 sum_ifm_axis = 0
572 tensors = [tens for tens in op.inputs if tens]
573 for tens in tensors:
574 sum_ifm_axis += tens.shape[axis]
575 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
576
577 valid = sum_ifm_axis == ofm_shape[axis]
578 extra = ", ".join(extra)
579 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
580
581 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200582 def constraint_stridedslice_input_count(op):
583 "Exactly 4 Input tensors are required"
584 inputs = len(op.inputs)
585 valid = inputs == 4
586 return valid, f"Op has {inputs} inputs"
587
588 @staticmethod
589 def constraint_pad_input_count(op):
590 "Number of input tensors must be exactly 2"
591 inputs = len(op.inputs)
592 valid = inputs == 2
593 return valid, f"Op has {inputs} inputs"
594
595 @staticmethod
596 def constraint_pad_constant(op):
597 "The padding tensor must be constant"
598 pad_tensor = op.inputs[1].values
599 valid = pad_tensor is not None
600 return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"
601
602 @staticmethod
Johan Gunnarsson81b765d2023-08-04 17:16:29 +0200603 def constraint_pad_output_shape(op):
604 "Shape of output tensor must equal to size of input tensor plus padding"
605 input_shape = op.inputs[0].shape
606 expected_output_shape = op.outputs[0].shape
607 pad_tensor = op.inputs[1].values
608 actual_output_shape = input_shape + pad_tensor.T[0] + pad_tensor.T[1]
609 valid = np.array_equal(actual_output_shape, expected_output_shape)
610 return valid, f"Op has wrong output tensor shape: {expected_output_shape}, has shape: {actual_output_shape}"
611
612 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200613 def constraint_stridedslice_inputs_const(op):
614 "Begin, End and Stride Input tensors must be constant"
615 valid = True
616 extra = []
617 _, begin, end, strides = op.inputs
618 if begin.values is None:
619 valid = False
620 extra.append(f"Begin tensor '{begin.name}'")
621 if end.values is None:
622 valid = False
623 extra.append(f"End tensor '{end.name}'")
624 if strides.values is None:
625 valid = False
626 extra.append(f"Stride tensor '{strides.name}'")
627 extra = ", ".join(extra)
628 return valid, f"Op has non-constant tensors: {extra}"
629
630 @staticmethod
631 def constraint_ellipsis_mask(op):
632 "ellipsis_mask must be 0"
633 ellipsis = op.attrs["ellipsis_mask"]
634 valid = ellipsis == 0
635 return valid, f"Op has ellipsis mask as: {ellipsis}"
636
637 @staticmethod
638 def constraint_axis_masks(op):
639 "new_axis_mask and shrink_axis_mask cannot both be set"
640 new_axis = op.attrs["new_axis_mask"]
641 shrink_axis = op.attrs["shrink_axis_mask"]
642 valid = (new_axis == 0) or (shrink_axis == 0)
643 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
644
Tim Halld0e41cf2023-02-14 14:54:18 +0000645 def _get_slice_offsets(input_shape, offset_tens, offset_mask, is_begin=True):
646 # For strided slice operator: get start or end offsets
647 # input_shape: List[int], offset_tens: Tensor, offset_mask: int, is_begin: bool = True
648 offsets = len(input_shape) * [0] if is_begin else input_shape[:]
649 for idx in range(len(input_shape)):
650 # If the i:th bit in the mask is not set then the value in offset_tens[i] should be used, otherwise it
651 # should be ignored
652 if (offset_mask & (1 << idx)) == 0:
653 offsets[idx] = offset_tens.values[idx]
654 if offsets[idx] < 0:
655 # Convert negative indexing to positive ones
656 offsets[idx] += input_shape[idx]
657 return offsets
658
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200659 @staticmethod
660 def constraint_slice_ranges(op):
661 "Slice 'end' values must be greater than 'begin' values"
662 ifm, begin, end, _ = op.inputs
Tim Halld0e41cf2023-02-14 14:54:18 +0000663 shrink_axis_mask = op.attrs["shrink_axis_mask"]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200664 # Calculate offset begin/end
Tim Halld0e41cf2023-02-14 14:54:18 +0000665 offset_begin = TFLiteSemantic._get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
666 offset_end = TFLiteSemantic._get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200667 # Check "end - begin" doesn't result in any zero or negative elements
Tim Halld0e41cf2023-02-14 14:54:18 +0000668 valid = True
669 # if a shrink mask bit is set then the end position provided by the operation should be ignored, and instead a
670 # new end position should be calculated so that calculations in the graph optimiser, such as (end - start),
671 # result in the correct value. otherwise, we just need to check that the begin and end values are valid
672 for i in range(len(ifm.shape)):
673 if (shrink_axis_mask & (1 << i)) != 0:
674 offset_end[i] = offset_begin[i] + 1
675 else:
676 if offset_end[i] <= offset_begin[i]:
677 valid = False
678
679 op.attrs["offset_begin"] = offset_begin
680 op.attrs["offset_end"] = offset_end
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200681 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
682
683 @staticmethod
684 def constraint_matching_inputs_types(op):
685 "Both Input data types must match"
686 ifm_dtype = op.ifm.dtype
687 ifm2_dtype = op.ifm2.dtype
688 valid = ifm_dtype == ifm2_dtype
689 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
690
691 @staticmethod
692 def constraint_matching_signed(op):
693 "For IFM that are signed, OFM must also be signed"
694 valid = True
695 ifm_dtype = op.ifm.dtype
696 ofm_dtype = op.ofm.dtype
697 if ifm_dtype.type & BaseType.Signed:
698 valid = bool(ofm_dtype.type & BaseType.Signed)
699 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
700
701 @staticmethod
702 def constraint_unsigned_valid(op):
703 "For IFM that are unsigned, OFM must either be the same type or int32"
704 valid = True
705 ifm_dtype = op.ifm.dtype
706 ofm_dtype = op.ofm.dtype
707 if ifm_dtype.type & BaseType.Unsigned:
708 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
709 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
710
711 @staticmethod
Fredrik Svedberg0ac08042023-04-11 22:35:04 +0200712 def constraint_input_signed(op):
713 "IFM must be int8 or int16"
714 ifm_dtype = op.ifm.dtype
715 valid = (ifm_dtype == DataType.int8) or (ifm_dtype == DataType.int16)
716 return valid, f"Op has ifm_dtype={ifm_dtype}"
717
718 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200719 def constraint_input_8bit(op):
720 "IFM must be int8 or uint8"
721 ifm_dtype = op.ifm.dtype
722 valid = (ifm_dtype == DataType.int8) or (ifm_dtype == DataType.uint8)
723 return valid, f"Op has ifm_dtype={ifm_dtype}"
724
725 @staticmethod
Johan Alfvenc1ad80b2023-03-31 10:19:23 +0200726 def constraint_argmax_output(op):
727 "OFM must be int32 or int64"
728 ofm_dtype = op.ofm.dtype
729 valid = ofm_dtype in (DataType.int32, DataType.int64)
730 return valid, f"Op has ofm_dtype={ofm_dtype}"
731
732 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200733 def constraint_matching_either_shapes(op):
734 "At least one Input's shape must match the OFM's shape"
735 ifm_shape = op.ifm.shape
736 ifm2_shape = op.ifm2.shape if op.ifm2 else None
737 ofm_shape = op.ofm.shape
738 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
739 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
740
741 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200742 def constraint_keep_dim_ifm_ofm(op):
743 "The IFM and OFM must have the same number of dimensions if keep_num_dims is set to true"
744 valid = True
745 if op.attrs.get("keep_num_dims"):
746 valid = len(op.ifm.shape) == len(op.ofm.shape)
747 return valid, f"Op has ifm shape={op.ifm.shape} and ofm shape={op.ofm.shape}"
748
749 @staticmethod
750 def constraint_mean_input_dims(op):
751 "Input tensor must be at least 2D"
752 dims = len(op.inputs[0].shape)
753 return 2 <= dims <= 4, f"Input is {dims}D"
754
755 @staticmethod
756 def constraint_mean_axis(op):
Alexander Hansson1d5e8592023-06-27 12:36:25 +0000757 """Requirements for axis parameter:
758 When IFM tensor is 2D:
759 - Reduction in both axes is supported.
760 When IFM tensor is 3D or 4D:
761 - Reduction in Batch axis is only supported if batch size is 1.
762 - Reduction in both Height and Width axes is supported.
Alexander Hanssonda8741a2023-06-30 15:41:13 +0000763 - Reduction in Depth axis is supported if at least one of H,W,C are of size 1."""
Alexander Hansson1d5e8592023-06-27 12:36:25 +0000764 input_shape = op.inputs[0].shape
765 dims = len(input_shape)
766 if op.inputs[1].shape == []:
767 axis = [int(op.inputs[1].values)]
768 else:
769 axis = list(op.inputs[1].values)
770 valid = True
771
772 for ax in axis:
773 if ax < 0 or ax >= dims:
774 return False, "Axis parameter is out of bounds. axis: {axis}, dims: {dims}. "
Alexander Hanssonda8741a2023-06-30 15:41:13 +0000775
776 # Batch is only supported if batch shape is 1
777 if dims == 4 and ax == 0:
778 if input_shape[0] != 1:
Alexander Hansson1d5e8592023-06-27 12:36:25 +0000779 valid = False
780 break
Alexander Hanssonda8741a2023-06-30 15:41:13 +0000781
782 # Depth is supported if any of h,w,c == 1
783 if dims == 3:
784 if ax == 2 and not any([s == 1 for s in input_shape]):
785 valid = False
786 break
787
788 # Depth is supported if any of h,w,c == 1
789 if dims == 4:
790 if ax == 3 and not any([s == 1 for s in input_shape[1:]]):
Alexander Hansson1d5e8592023-06-27 12:36:25 +0000791 valid = False
792 break
793
794 return valid, f"Shape is {input_shape}, Axis is {axis}."
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200795
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200796 @staticmethod
797 def constraint_matching_in_out_quant(op):
798 "Input and output quantisation must match."
799 if not check_quantized_tens_scaling_equal(op.ifm, op.ofm):
800 return False, "IFM and OFM quantisation parameters are not equal."
801 return True, "IFM and OFM quantisation parameters matches."
802
Johan Alfven3ac03be2023-03-01 09:53:35 +0100803 @staticmethod
804 def constraint_matching_in_out_elements(op):
805 "Input and output number of elements must match."
806 if shape_num_elements(op.ifm.shape) != shape_num_elements(op.ofm.shape):
807 return False, f"IFM {op.ifm.shape} and OFM {op.ofm.shape} number of elements are not equal."
808 return True, "IFM and OFM number of elements are equal."
809
Fredrik Svedberg0ac08042023-04-11 22:35:04 +0200810 @staticmethod
811 def constraint_lstm_dimensions(op):
812 "IFM and OFM must have 3D shape"
813 valid = len(op.ifm.shape) == len(op.ofm.shape) == 3
814 return valid, f"Op has ifm shape {op.ifm.shape} and ofm shape {op.ofm.shape}"
815
816 @staticmethod
817 def constraint_lstm_inputs(op):
818 "Must have 24 input tensors"
819 n_inputs = len(op.inputs)
820 return n_inputs == 24, f"Op has {n_inputs} inputs"
821
822 @staticmethod
823 def constraint_lstm_intermediates(op):
824 "Must have 5 intermediate tensors"
825 n_intermediates = len(op.intermediates)
826 return n_intermediates == 5, f"Op has {n_intermediates} intermediates"
827
828 @staticmethod
829 def constraint_lstm_variables(op):
830 "State tensors must be variable"
831 valid = True
832 extra = []
833 for tens in op.inputs[18:20]:
834 if not tens.is_variable:
835 valid = False
836 extra.append(tens.name)
837 extra = ", ".join(extra)
838 return valid, f"Op has non-variable state tensor(s): {extra}"
839
Johan Alfvenf418e832023-11-13 10:23:32 +0100840 @staticmethod
841 def constraint_transpose_permutation_size(op):
842 "Permutation array must be a 1D tensor with RANK(IFM) elements"
843 dims = len(op.inputs[0].shape)
844 perm = op.inputs[1]
845 valid = len(perm.shape) == 1 and perm.shape[0] == dims
846 return valid, f"Op has ifm_dimension={dims} and permutation shape {perm.shape}"
847
848 @staticmethod
849 def constraint_transpose_permutation_values(op):
850 "Permutation array must have constant values in the range [0, RANK(IFM))"
851 dims = len(op.inputs[0].shape)
852 perm = op.inputs[1]
853 valid = False
854 if perm.values is not None:
855 valid = not any([val < 0 or val >= dims for val in perm.values])
856 return valid, f"Op has ifm_dimension={dims} and permutation values are: {perm.values}"
857
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200858
859def tflite_semantic_checker(nng):
860 semantic_checker = TFLiteSemantic()
861 for sg in nng.subgraphs:
862 for op in sg.get_all_ops():
863 op.run_on_npu = semantic_checker.is_operator_semantic_valid(op)
864 return nng