blob: 16ca2797813e7284cb0492f359c1d5bfa06d8f6e [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# The TFLiteSemantic class which is a collection of TensorFlow lite model semantic checks.
18from collections import defaultdict
19
20import numpy as np
21
22from .data_type import BaseType
23from .data_type import DataType
24from .numeric_util import is_integer
25from .operation import get_slice_offsets
26from .operation import Op
27from .supported_operators_util import docstring_format_args
28from .supported_operators_util import list_formatter
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020029from .tensor import check_quantized_tens_scaling_equal
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020030from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
31from .tflite_mapping import optype_to_builtintype
32
33
34def _optype_formatter(op_list):
35 # Convert internal op types to external names
36 output = map(optype_to_builtintype, op_list)
37 # Remove UNKNOWNs
38 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
39 return list_formatter(output)
40
41
42class TFLiteSemantic:
43 # Categorised lists of operators
Jonas Ohlssond8575072022-03-30 10:30:25 +020044 convolution_ops = set(
45 (
46 Op.Conv2DBias,
47 Op.Conv2D,
48 Op.QuantizedConv2D,
49 )
50 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020051 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
52 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
53 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
54 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
55 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
56 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
57 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Jonas Ohlssond8575072022-03-30 10:30:25 +020058 binary_elem_wise_min_max_ops = set(
59 (
60 Op.Minimum,
61 Op.Maximum,
62 )
63 )
64 binary_elem_wise_shift_ops = set(
65 (
66 Op.SHL,
67 Op.SHR,
68 )
69 )
70 binary_elem_wise_add_mul_sub = set(
71 (
72 Op.Add,
73 Op.Mul,
74 Op.Sub,
75 )
76 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020077 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
78 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Fredrik Svedberg11563172022-07-06 14:54:12 +020079 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV, Op.Mean, Op.ExpandDims, Op.Quantize))
Jonas Ohlssond8575072022-03-30 10:30:25 +020080 reshape_ops = set(
81 (
82 Op.Reshape,
83 Op.QuantizedReshape,
84 Op.Squeeze,
85 Op.ExpandDims,
86 )
87 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020088
89 def __init__(self):
90 # Setup the generic constraints. Note: the order matters
91 self.generic_constraints = []
92 self.generic_constraints.append(TFLiteSemantic.constraint_tens_no_dynamic)
93 self.generic_constraints.append(TFLiteSemantic.constraint_tens_defined_shape)
94 self.generic_constraints.append(TFLiteSemantic.constraint_tens_output_scalar)
95 self.generic_constraints.append(TFLiteSemantic.constraint_tens_input_scalar)
96 self.generic_constraints.append(TFLiteSemantic.constraint_tens_shape_size)
97
98 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_none_check)
99 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_scale)
100 self.generic_constraints.append(TFLiteSemantic.constraint_quant_scale_inf)
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100101 self.generic_constraints.append(TFLiteSemantic.constraint_none_const_tensors)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200102
103 # Setup specific constraints. Note: the order matters
104 self.specific_constraints = defaultdict(list)
105
106 # Conv-like checks:
107 for op_type in TFLiteSemantic.convolution_like_ops:
108 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
109 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_dilation_type)
110
111 # Pooling checks:
112 for op_type in TFLiteSemantic.pooling_ops:
113 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
114 # AVG pooling specific checks:
115 for op_type in TFLiteSemantic.avg_pooling_ops:
116 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
117 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
118 # MAX pooling specific checks:
119 for op_type in TFLiteSemantic.max_pooling_ops:
120 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
121 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
122
123 # Concat specific checks:
124 for op_type in (Op.Concat, Op.ConcatTFLite):
125 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_exists)
126 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_valid)
127 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_dimensionality)
128 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_valid_dimensions)
129
130 # Element-wise checks:
131 for op_type in TFLiteSemantic.elem_wise_main_ops:
132 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_either_shapes)
133 # Unary specific checks:
134 for op_type in TFLiteSemantic.unary_elem_wise_main_ops:
135 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
136 # Binary Min/Max specific checks:
137 for op_type in TFLiteSemantic.binary_elem_wise_min_max_ops:
138 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
139 # Binary Add/Mul/Sub specific checks:
140 for op_type in TFLiteSemantic.binary_elem_wise_add_mul_sub:
141 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_inputs_types)
142 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_signed)
143 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_unsigned_valid)
144
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200145 # Ops reshaping dimensions: Reshape, Squeeze and ExpandDims
146 for op_type in TFLiteSemantic.reshape_ops:
147 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_quant)
148
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200149 # Softmax specific checks:
150 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_shapes)
151 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_in_out_types)
152 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_beta_value_range)
153
154 # SplitV specific checks:
155 self.specific_constraints[Op.SplitV].append(TFLiteSemantic.constraint_splitv_inferred)
156
157 # StridedSlice specific checks:
158 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_input_count)
159 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_inputs_const)
160 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_ellipsis_mask)
161 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_axis_masks)
162 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_slice_ranges)
163
164 # LeakyRelu specific checks:
165 self.specific_constraints[Op.LeakyRelu].append(TFLiteSemantic.constraint_alpha_valid)
166
167 # FullyConnected specific checks:
168 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_fc_output_2d)
169 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_keep_dim_ifm_ofm)
170
171 # Pad specific checks:
172 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_input_count)
173 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_constant)
174
175 # HardSwish specific checks:
176 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_input_8bit)
177 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_matching_in_out_types)
178 # Mean specific checks:
179 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_input_8bit)
180 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_input_dims)
181 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_axis)
182
183 def is_operator_semantic_valid(self, op):
184 ext_type = optype_to_builtintype(op.type)
185
186 if op.type in (Op.Placeholder, Op.SubgraphInput, Op.Const):
187 return True
188
Ayaan Masood4965fae2022-06-29 11:30:57 +0100189 # Generic constraints list filtered out to exclude certain constraints depending on op.type
190 filtered_generic_constraints = []
191
192 for constraint in self.generic_constraints:
193 # Check constraint not in dictionary otherwise return empty array
194 if constraint not in self.get_generic_constraint_exclude_list().get(op.type, []):
195 filtered_generic_constraints.append(constraint)
196
197 for constraint in filtered_generic_constraints + self.specific_constraints[op.type]:
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200198 valid, extra = constraint(op)
199 if not valid:
200 print(
Tim Hall3584a9c2021-11-18 22:05:17 +0000201 f"Warning: Unsupported TensorFlow Lite semantics for {ext_type} '{op.name}'. Placing on CPU instead"
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200202 )
203 print(f" - {constraint.__doc__}")
204 if extra:
205 print(f" {extra}")
206 return False
207
208 return True
209
210 @staticmethod
Ayaan Masood4965fae2022-06-29 11:30:57 +0100211 def get_generic_constraint_exclude_list():
212
213 # Not all generic constraints can be applied to each operator
214 generic_constraints_exclude_list = {
215 Op.Shape: [
216 TFLiteSemantic.constraint_tens_quant_none_check,
Ayaan Masood25f48dd2022-06-29 18:16:04 +0100217 ],
218 Op.Quantize: [
219 TFLiteSemantic.constraint_tens_no_dynamic,
220 TFLiteSemantic.constraint_tens_output_scalar,
Ayaan Masood25f48dd2022-06-29 18:16:04 +0100221 ],
Ayaan Masood4965fae2022-06-29 11:30:57 +0100222 }
223 return generic_constraints_exclude_list
224
225 @staticmethod
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100226 def constraint_none_const_tensors(op):
227 "Constant tensors should not have NoneType-values"
228 valid = True
229 extra = ""
230 for tens in filter(None, op.inputs):
231 if len(tens.ops) > 0 and tens.ops[0].type == Op.Const and tens.values is None:
232 valid = False
233 extra = str(tens.name)
234 return valid, f"Unexpected None value for constant tensor: {extra}"
235
236 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200237 def constraint_tens_no_dynamic(op):
238 "Input(s) and Output tensors must not be dynamic"
239 valid = True
240 extra = []
241 tensors = [tens for tens in op.inputs + op.outputs if tens]
242 for tens in tensors:
243 if (tens.shape == []) and (tens.values is None):
244 valid = False
245 extra.append(tens.name)
246 extra = ", ".join(extra)
247 return valid, f"Op has dynamic tensor(s): {extra}"
248
249 @staticmethod
250 def constraint_tens_defined_shape(op):
251 "Input(s) and Output tensors must have a defined shape"
252 valid = True
253 extra = []
254 tensors = [tens for tens in op.inputs + op.outputs if tens]
255 for tens in tensors:
256 if not tens.has_fully_defined_shape():
257 valid = False
258 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
259 return valid, ", ".join(extra)
260
261 @staticmethod
262 def constraint_tens_output_scalar(op):
263 "Output tensors cannot be scalar"
264 ofm = op.ofm
265 valid = ofm.shape != []
266 return valid, f"Output Tensor '{ofm.name}' is scalar"
267
268 @classmethod
269 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
270 def constraint_tens_input_scalar(cls, op):
271 "Scalar Input tensors are only valid for op type: {}"
272 valid = True
273 extra = []
274 tensors = [tens for tens in op.inputs if tens]
275 for tens in tensors:
276 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
277 valid = False
278 extra.append(tens.name)
279 extra = ", ".join(extra)
280 return valid, f"Op has scalar input tensor(s): {extra}"
281
282 @staticmethod
283 def constraint_tens_shape_size(op):
284 "Input(s) and Output tensors must not be greater than 4D"
285 valid = True
286 extra = []
287 tensors = [tens for tens in op.inputs + op.outputs if tens]
288 for tens in tensors:
289 if len(tens.shape) > 4:
290 valid = False
291 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
292 return valid, ", ".join(extra)
293
294 @staticmethod
295 def constraint_tens_quant_none_check(op):
296 "Input(s), Output and Weight tensors must have quantization parameters"
297 valid = True
298 extra = []
299 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
300 for tens in tensors:
301 if tens.quantization is None:
302 valid = False
303 extra.append(tens.name)
304 extra = ", ".join(extra)
305 return valid, f"Op has tensors with missing quantization parameters: {extra}"
306
307 @staticmethod
308 def constraint_tens_quant_scale(op):
309 "Input(s), Output and Weight tensors with quantization scales must be finite"
310 valid = True
311 extra = []
312 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
313 for tens in tensors:
Fredrik Svedberg11563172022-07-06 14:54:12 +0200314 if (
315 tens.quantization
316 and tens.quantization.scale_f32 is not None
317 and np.isinf(tens.quantization.scale_f32).any()
318 ):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200319 valid = False
320 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
321 return valid, ", ".join(extra)
322
323 @staticmethod
324 def constraint_fc_output_2d(op):
Ayaan Masooda2ec5aa2022-04-21 14:28:03 +0100325 """The output tensor(s) must have 2D shape"""
326 valid = op.ifm.get_shape_as_2d(op.weights.shape[-2]) is not None
327 extra = f"Op has non-2D output tensor '{op.ofm.name}'" if not valid else ""
328
329 return valid, extra
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200330
331 @staticmethod
332 def constraint_stride_type(op):
333 "Stride values for both width and height must be integer types"
334 w, h = op.get_kernel_stride()
335 valid = is_integer(w) and is_integer(h)
336 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
337
338 @staticmethod
339 def constraint_dilation_type(op):
340 "Dilation factor values for both width and height must be integer types"
341 w, h = op.get_kernel_dilation()
342 valid = is_integer(w) and is_integer(h)
343 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
344
345 @staticmethod
346 def constraint_quant_scale_inf(op):
347 "Input and Output tensors must have quantization scales that fit within float32 precision"
348 if op.ofm is not None and op.ofm.is_quantized():
349 ofm_scale = op.ofm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200350 if np.any(ofm_scale < np.finfo(np.float32).tiny):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200351 return (
352 False,
353 f"The quantization scale of the output tensor is {ofm_scale}, "
354 + f"minimum supported is: {np.finfo(np.float32).tiny}",
355 )
356 if op.ifm is not None and op.ifm.is_quantized():
357 ifm_scale = op.ifm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200358 if np.any(np.isinf(ifm_scale / ofm_scale)):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200359 return (
360 False,
361 f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
362 )
363 return True, "Op's quantization is ok"
364
365 @staticmethod
366 def constraint_matching_in_out_types(op):
367 "IFM and OFM data types must match"
368 ifm_dtype = op.ifm.dtype
369 ofm_dtype = op.ofm.dtype
370 valid = ifm_dtype == ofm_dtype
371 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
372
373 @staticmethod
374 def constraint_beta_value_range(op):
375 "Beta value needs to be positive"
376 beta = op.attrs.get("beta", 1.0)
377 valid = beta >= 0
378 return valid, f"Op has beta={beta}"
379
380 @staticmethod
381 def constraint_filter_type(op):
382 "Kernel filter values for both width and height must be integer types"
383 w = op.kernel.width
384 h = op.kernel.height
385 valid = is_integer(w) and is_integer(h)
386 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
387
388 @staticmethod
389 def constraint_matching_shapes(op):
390 "IFM and OFM shapes must match"
391 ifm_shape = op.ifm.shape
392 ofm_shape = op.ofm.shape
393 valid = ifm_shape == ofm_shape
394 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
395
396 @staticmethod
397 def constraint_splitv_inferred(op):
398 "Only one size is allowed to be inferred"
399 sizes = op.inputs[1].values
400 valid = np.count_nonzero(sizes == -1) <= 1
401 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
402
403 @staticmethod
404 def constraint_axis_exists(op):
405 "Axis attribute must exist"
406 axis = op.attrs.get("axis")
407 valid = axis is not None
408 return valid, f"Op has axis={axis}"
409
410 @staticmethod
411 def constraint_axis_valid(op):
412 "Axis attribute must be in the range [0, <ofm_dimensions>)"
413 dims = len(op.ofm.shape)
414 axis = op.attrs["axis"]
415 axis += dims if axis < 0 else 0
416 valid = 0 <= axis < dims
417 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
418
419 @staticmethod
420 def constraint_matching_dimensionality(op):
421 "All Input dimensionalities must match OFM dimensionality"
422 valid = True
423 extra = []
424 ofm_dim = len(op.ofm.shape)
425 tensors = [tens for tens in op.inputs if tens]
426 for tens in tensors:
427 dim = len(tens.shape)
428 if dim != ofm_dim:
429 valid = False
430 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
431 extra = ", ".join(extra)
432 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
433
434 @staticmethod
435 def constraint_valid_dimensions(op):
436 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
437 valid = True
438 extra = []
439 ofm_shape = op.ofm.shape
440 ofm_dim = len(ofm_shape)
441 axis = op.attrs["axis"]
442 axis += ofm_dim if axis < 0 else 0
443 tensors = [tens for tens in op.inputs if tens]
444 for tens in tensors:
445 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
446 valid = False
447 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
448 extra = ", ".join(extra)
449 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
450
451 @staticmethod
452 def constraint_stridedslice_input_count(op):
453 "Exactly 4 Input tensors are required"
454 inputs = len(op.inputs)
455 valid = inputs == 4
456 return valid, f"Op has {inputs} inputs"
457
458 @staticmethod
459 def constraint_pad_input_count(op):
460 "Number of input tensors must be exactly 2"
461 inputs = len(op.inputs)
462 valid = inputs == 2
463 return valid, f"Op has {inputs} inputs"
464
465 @staticmethod
466 def constraint_pad_constant(op):
467 "The padding tensor must be constant"
468 pad_tensor = op.inputs[1].values
469 valid = pad_tensor is not None
470 return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"
471
472 @staticmethod
473 def constraint_stridedslice_inputs_const(op):
474 "Begin, End and Stride Input tensors must be constant"
475 valid = True
476 extra = []
477 _, begin, end, strides = op.inputs
478 if begin.values is None:
479 valid = False
480 extra.append(f"Begin tensor '{begin.name}'")
481 if end.values is None:
482 valid = False
483 extra.append(f"End tensor '{end.name}'")
484 if strides.values is None:
485 valid = False
486 extra.append(f"Stride tensor '{strides.name}'")
487 extra = ", ".join(extra)
488 return valid, f"Op has non-constant tensors: {extra}"
489
490 @staticmethod
491 def constraint_ellipsis_mask(op):
492 "ellipsis_mask must be 0"
493 ellipsis = op.attrs["ellipsis_mask"]
494 valid = ellipsis == 0
495 return valid, f"Op has ellipsis mask as: {ellipsis}"
496
497 @staticmethod
498 def constraint_axis_masks(op):
499 "new_axis_mask and shrink_axis_mask cannot both be set"
500 new_axis = op.attrs["new_axis_mask"]
501 shrink_axis = op.attrs["shrink_axis_mask"]
502 valid = (new_axis == 0) or (shrink_axis == 0)
503 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
504
505 @staticmethod
506 def constraint_slice_ranges(op):
507 "Slice 'end' values must be greater than 'begin' values"
508 ifm, begin, end, _ = op.inputs
509 # Calculate offset begin/end
510 offset_begin = get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
511 offset_end = get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
512 # Check "end - begin" doesn't result in any zero or negative elements
513 valid = all((e - b) > 0 for b, e in zip(offset_begin, offset_end))
514 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
515
516 @staticmethod
517 def constraint_matching_inputs_types(op):
518 "Both Input data types must match"
519 ifm_dtype = op.ifm.dtype
520 ifm2_dtype = op.ifm2.dtype
521 valid = ifm_dtype == ifm2_dtype
522 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
523
524 @staticmethod
525 def constraint_matching_signed(op):
526 "For IFM that are signed, OFM must also be signed"
527 valid = True
528 ifm_dtype = op.ifm.dtype
529 ofm_dtype = op.ofm.dtype
530 if ifm_dtype.type & BaseType.Signed:
531 valid = bool(ofm_dtype.type & BaseType.Signed)
532 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
533
534 @staticmethod
535 def constraint_unsigned_valid(op):
536 "For IFM that are unsigned, OFM must either be the same type or int32"
537 valid = True
538 ifm_dtype = op.ifm.dtype
539 ofm_dtype = op.ofm.dtype
540 if ifm_dtype.type & BaseType.Unsigned:
541 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
542 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
543
544 @staticmethod
545 def constraint_input_8bit(op):
546 "IFM must be int8 or uint8"
547 ifm_dtype = op.ifm.dtype
548 valid = (ifm_dtype == DataType.int8) or (ifm_dtype == DataType.uint8)
549 return valid, f"Op has ifm_dtype={ifm_dtype}"
550
551 @staticmethod
552 def constraint_matching_either_shapes(op):
553 "At least one Input's shape must match the OFM's shape"
554 ifm_shape = op.ifm.shape
555 ifm2_shape = op.ifm2.shape if op.ifm2 else None
556 ofm_shape = op.ofm.shape
557 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
558 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
559
560 @staticmethod
561 def constraint_alpha_valid(op):
Johan Alfvéne51a05c2022-05-11 13:10:50 +0200562 "Alpha only allowed to be negative if IFM is int8 or uint8"
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200563 alpha = op.attrs["alpha"]
Johan Alfvéne51a05c2022-05-11 13:10:50 +0200564 ifm_dtype = op.ifm.dtype
565 valid = ifm_dtype == DataType.int8 or ifm_dtype == DataType.uint8 or alpha >= 0
566 return valid, f"Op has alpha={alpha} and ifm_dtype={ifm_dtype} "
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200567
568 @staticmethod
569 def constraint_keep_dim_ifm_ofm(op):
570 "The IFM and OFM must have the same number of dimensions if keep_num_dims is set to true"
571 valid = True
572 if op.attrs.get("keep_num_dims"):
573 valid = len(op.ifm.shape) == len(op.ofm.shape)
574 return valid, f"Op has ifm shape={op.ifm.shape} and ofm shape={op.ofm.shape}"
575
576 @staticmethod
577 def constraint_mean_input_dims(op):
578 "Input tensor must be at least 2D"
579 dims = len(op.inputs[0].shape)
580 return 2 <= dims <= 4, f"Input is {dims}D"
581
582 @staticmethod
583 def constraint_mean_axis(op):
584 "Axis indices must correspond to height and width axes"
585 dims = len(op.inputs[0].shape)
586 axis = int(op.inputs[1].values) if op.inputs[1].shape == [] else list(op.inputs[1].values)
587 if dims == 2 or dims == 3:
588 valid = axis in (0, 1, [0], [1], [0, 1], [1, 0])
589 elif dims == 4:
590 valid = axis in (1, 2, [1], [2], [1, 2], [2, 1])
591 return valid, f"Axis is {axis}"
592
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200593 @staticmethod
594 def constraint_matching_in_out_quant(op):
595 "Input and output quantisation must match."
596 if not check_quantized_tens_scaling_equal(op.ifm, op.ofm):
597 return False, "IFM and OFM quantisation parameters are not equal."
598 return True, "IFM and OFM quantisation parameters matches."
599
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200600
601def tflite_semantic_checker(nng):
602 semantic_checker = TFLiteSemantic()
603 for sg in nng.subgraphs:
604 for op in sg.get_all_ops():
605 op.run_on_npu = semantic_checker.is_operator_semantic_valid(op)
606 return nng