blob: b2644791605c6eae7f1e4e13ba0d32b64629d0ac [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# The TFLiteSemantic class which is a collection of TensorFlow lite model semantic checks.
18from collections import defaultdict
19
20import numpy as np
21
22from .data_type import BaseType
23from .data_type import DataType
24from .numeric_util import is_integer
25from .operation import get_slice_offsets
26from .operation import Op
27from .supported_operators_util import docstring_format_args
28from .supported_operators_util import list_formatter
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020029from .tensor import check_quantized_tens_scaling_equal
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020030from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
31from .tflite_mapping import optype_to_builtintype
32
33
34def _optype_formatter(op_list):
35 # Convert internal op types to external names
36 output = map(optype_to_builtintype, op_list)
37 # Remove UNKNOWNs
38 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
39 return list_formatter(output)
40
41
42class TFLiteSemantic:
43 # Categorised lists of operators
Jonas Ohlssond8575072022-03-30 10:30:25 +020044 convolution_ops = set(
45 (
46 Op.Conv2DBias,
47 Op.Conv2D,
48 Op.QuantizedConv2D,
49 )
50 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020051 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
52 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
53 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
54 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
55 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
56 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
57 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Jonas Ohlssond8575072022-03-30 10:30:25 +020058 binary_elem_wise_min_max_ops = set(
59 (
60 Op.Minimum,
61 Op.Maximum,
62 )
63 )
64 binary_elem_wise_shift_ops = set(
65 (
66 Op.SHL,
67 Op.SHR,
68 )
69 )
70 binary_elem_wise_add_mul_sub = set(
71 (
72 Op.Add,
73 Op.Mul,
74 Op.Sub,
75 )
76 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020077 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
78 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020079 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV, Op.Mean, Op.ExpandDims))
Jonas Ohlssond8575072022-03-30 10:30:25 +020080 reshape_ops = set(
81 (
82 Op.Reshape,
83 Op.QuantizedReshape,
84 Op.Squeeze,
85 Op.ExpandDims,
86 )
87 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020088
89 def __init__(self):
90 # Setup the generic constraints. Note: the order matters
91 self.generic_constraints = []
92 self.generic_constraints.append(TFLiteSemantic.constraint_tens_no_dynamic)
93 self.generic_constraints.append(TFLiteSemantic.constraint_tens_defined_shape)
94 self.generic_constraints.append(TFLiteSemantic.constraint_tens_output_scalar)
95 self.generic_constraints.append(TFLiteSemantic.constraint_tens_input_scalar)
96 self.generic_constraints.append(TFLiteSemantic.constraint_tens_shape_size)
97
98 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_none_check)
99 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_scale)
100 self.generic_constraints.append(TFLiteSemantic.constraint_quant_scale_inf)
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100101 self.generic_constraints.append(TFLiteSemantic.constraint_none_const_tensors)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200102
103 # Setup specific constraints. Note: the order matters
104 self.specific_constraints = defaultdict(list)
105
106 # Conv-like checks:
107 for op_type in TFLiteSemantic.convolution_like_ops:
108 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
109 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_dilation_type)
110
111 # Pooling checks:
112 for op_type in TFLiteSemantic.pooling_ops:
113 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
114 # AVG pooling specific checks:
115 for op_type in TFLiteSemantic.avg_pooling_ops:
116 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
117 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
118 # MAX pooling specific checks:
119 for op_type in TFLiteSemantic.max_pooling_ops:
120 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
121 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
122
123 # Concat specific checks:
124 for op_type in (Op.Concat, Op.ConcatTFLite):
125 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_exists)
126 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_valid)
127 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_dimensionality)
128 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_valid_dimensions)
129
130 # Element-wise checks:
131 for op_type in TFLiteSemantic.elem_wise_main_ops:
132 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_either_shapes)
133 # Unary specific checks:
134 for op_type in TFLiteSemantic.unary_elem_wise_main_ops:
135 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
136 # Binary Min/Max specific checks:
137 for op_type in TFLiteSemantic.binary_elem_wise_min_max_ops:
138 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
139 # Binary Add/Mul/Sub specific checks:
140 for op_type in TFLiteSemantic.binary_elem_wise_add_mul_sub:
141 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_inputs_types)
142 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_signed)
143 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_unsigned_valid)
144
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200145 # Ops reshaping dimensions: Reshape, Squeeze and ExpandDims
146 for op_type in TFLiteSemantic.reshape_ops:
147 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_quant)
148
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200149 # Softmax specific checks:
150 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_shapes)
151 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_in_out_types)
152 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_beta_value_range)
153
154 # SplitV specific checks:
155 self.specific_constraints[Op.SplitV].append(TFLiteSemantic.constraint_splitv_inferred)
156
157 # StridedSlice specific checks:
158 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_input_count)
159 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_inputs_const)
160 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_ellipsis_mask)
161 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_axis_masks)
162 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_slice_ranges)
163
164 # LeakyRelu specific checks:
165 self.specific_constraints[Op.LeakyRelu].append(TFLiteSemantic.constraint_alpha_valid)
166
167 # FullyConnected specific checks:
168 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_fc_output_2d)
169 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_keep_dim_ifm_ofm)
170
171 # Pad specific checks:
172 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_input_count)
173 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_constant)
174
175 # HardSwish specific checks:
176 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_input_8bit)
177 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_matching_in_out_types)
178 # Mean specific checks:
179 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_input_8bit)
180 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_input_dims)
181 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_axis)
182
183 def is_operator_semantic_valid(self, op):
184 ext_type = optype_to_builtintype(op.type)
185
186 if op.type in (Op.Placeholder, Op.SubgraphInput, Op.Const):
187 return True
188
189 for constraint in self.generic_constraints + self.specific_constraints[op.type]:
190 valid, extra = constraint(op)
191 if not valid:
192 print(
Tim Hall3584a9c2021-11-18 22:05:17 +0000193 f"Warning: Unsupported TensorFlow Lite semantics for {ext_type} '{op.name}'. Placing on CPU instead"
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200194 )
195 print(f" - {constraint.__doc__}")
196 if extra:
197 print(f" {extra}")
198 return False
199
200 return True
201
202 @staticmethod
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100203 def constraint_none_const_tensors(op):
204 "Constant tensors should not have NoneType-values"
205 valid = True
206 extra = ""
207 for tens in filter(None, op.inputs):
208 if len(tens.ops) > 0 and tens.ops[0].type == Op.Const and tens.values is None:
209 valid = False
210 extra = str(tens.name)
211 return valid, f"Unexpected None value for constant tensor: {extra}"
212
213 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200214 def constraint_tens_no_dynamic(op):
215 "Input(s) and Output tensors must not be dynamic"
216 valid = True
217 extra = []
218 tensors = [tens for tens in op.inputs + op.outputs if tens]
219 for tens in tensors:
220 if (tens.shape == []) and (tens.values is None):
221 valid = False
222 extra.append(tens.name)
223 extra = ", ".join(extra)
224 return valid, f"Op has dynamic tensor(s): {extra}"
225
226 @staticmethod
227 def constraint_tens_defined_shape(op):
228 "Input(s) and Output tensors must have a defined shape"
229 valid = True
230 extra = []
231 tensors = [tens for tens in op.inputs + op.outputs if tens]
232 for tens in tensors:
233 if not tens.has_fully_defined_shape():
234 valid = False
235 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
236 return valid, ", ".join(extra)
237
238 @staticmethod
239 def constraint_tens_output_scalar(op):
240 "Output tensors cannot be scalar"
241 ofm = op.ofm
242 valid = ofm.shape != []
243 return valid, f"Output Tensor '{ofm.name}' is scalar"
244
245 @classmethod
246 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
247 def constraint_tens_input_scalar(cls, op):
248 "Scalar Input tensors are only valid for op type: {}"
249 valid = True
250 extra = []
251 tensors = [tens for tens in op.inputs if tens]
252 for tens in tensors:
253 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
254 valid = False
255 extra.append(tens.name)
256 extra = ", ".join(extra)
257 return valid, f"Op has scalar input tensor(s): {extra}"
258
259 @staticmethod
260 def constraint_tens_shape_size(op):
261 "Input(s) and Output tensors must not be greater than 4D"
262 valid = True
263 extra = []
264 tensors = [tens for tens in op.inputs + op.outputs if tens]
265 for tens in tensors:
266 if len(tens.shape) > 4:
267 valid = False
268 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
269 return valid, ", ".join(extra)
270
271 @staticmethod
272 def constraint_tens_quant_none_check(op):
273 "Input(s), Output and Weight tensors must have quantization parameters"
274 valid = True
275 extra = []
276 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
277 for tens in tensors:
278 if tens.quantization is None:
279 valid = False
280 extra.append(tens.name)
281 extra = ", ".join(extra)
282 return valid, f"Op has tensors with missing quantization parameters: {extra}"
283
284 @staticmethod
285 def constraint_tens_quant_scale(op):
286 "Input(s), Output and Weight tensors with quantization scales must be finite"
287 valid = True
288 extra = []
289 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
290 for tens in tensors:
291 if (tens.quantization.scale_f32 is not None) and np.isinf(tens.quantization.scale_f32).any():
292 valid = False
293 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
294 return valid, ", ".join(extra)
295
296 @staticmethod
297 def constraint_fc_output_2d(op):
298 "The output tensor(s) must have 2D shape"
299 valid = True
300 extra = []
301 for tens in op.outputs:
302 if len(tens.shape) != 2:
303 valid = False
304 extra.append(f"Tensor '{tens.name}' is {len(tens.shape)}D")
305 return valid, ", ".join(extra)
306
307 @staticmethod
308 def constraint_stride_type(op):
309 "Stride values for both width and height must be integer types"
310 w, h = op.get_kernel_stride()
311 valid = is_integer(w) and is_integer(h)
312 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
313
314 @staticmethod
315 def constraint_dilation_type(op):
316 "Dilation factor values for both width and height must be integer types"
317 w, h = op.get_kernel_dilation()
318 valid = is_integer(w) and is_integer(h)
319 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
320
321 @staticmethod
322 def constraint_quant_scale_inf(op):
323 "Input and Output tensors must have quantization scales that fit within float32 precision"
324 if op.ofm is not None and op.ofm.is_quantized():
325 ofm_scale = op.ofm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200326 if np.any(ofm_scale < np.finfo(np.float32).tiny):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200327 return (
328 False,
329 f"The quantization scale of the output tensor is {ofm_scale}, "
330 + f"minimum supported is: {np.finfo(np.float32).tiny}",
331 )
332 if op.ifm is not None and op.ifm.is_quantized():
333 ifm_scale = op.ifm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200334 if np.any(np.isinf(ifm_scale / ofm_scale)):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200335 return (
336 False,
337 f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
338 )
339 return True, "Op's quantization is ok"
340
341 @staticmethod
342 def constraint_matching_in_out_types(op):
343 "IFM and OFM data types must match"
344 ifm_dtype = op.ifm.dtype
345 ofm_dtype = op.ofm.dtype
346 valid = ifm_dtype == ofm_dtype
347 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
348
349 @staticmethod
350 def constraint_beta_value_range(op):
351 "Beta value needs to be positive"
352 beta = op.attrs.get("beta", 1.0)
353 valid = beta >= 0
354 return valid, f"Op has beta={beta}"
355
356 @staticmethod
357 def constraint_filter_type(op):
358 "Kernel filter values for both width and height must be integer types"
359 w = op.kernel.width
360 h = op.kernel.height
361 valid = is_integer(w) and is_integer(h)
362 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
363
364 @staticmethod
365 def constraint_matching_shapes(op):
366 "IFM and OFM shapes must match"
367 ifm_shape = op.ifm.shape
368 ofm_shape = op.ofm.shape
369 valid = ifm_shape == ofm_shape
370 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
371
372 @staticmethod
373 def constraint_splitv_inferred(op):
374 "Only one size is allowed to be inferred"
375 sizes = op.inputs[1].values
376 valid = np.count_nonzero(sizes == -1) <= 1
377 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
378
379 @staticmethod
380 def constraint_axis_exists(op):
381 "Axis attribute must exist"
382 axis = op.attrs.get("axis")
383 valid = axis is not None
384 return valid, f"Op has axis={axis}"
385
386 @staticmethod
387 def constraint_axis_valid(op):
388 "Axis attribute must be in the range [0, <ofm_dimensions>)"
389 dims = len(op.ofm.shape)
390 axis = op.attrs["axis"]
391 axis += dims if axis < 0 else 0
392 valid = 0 <= axis < dims
393 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
394
395 @staticmethod
396 def constraint_matching_dimensionality(op):
397 "All Input dimensionalities must match OFM dimensionality"
398 valid = True
399 extra = []
400 ofm_dim = len(op.ofm.shape)
401 tensors = [tens for tens in op.inputs if tens]
402 for tens in tensors:
403 dim = len(tens.shape)
404 if dim != ofm_dim:
405 valid = False
406 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
407 extra = ", ".join(extra)
408 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
409
410 @staticmethod
411 def constraint_valid_dimensions(op):
412 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
413 valid = True
414 extra = []
415 ofm_shape = op.ofm.shape
416 ofm_dim = len(ofm_shape)
417 axis = op.attrs["axis"]
418 axis += ofm_dim if axis < 0 else 0
419 tensors = [tens for tens in op.inputs if tens]
420 for tens in tensors:
421 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
422 valid = False
423 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
424 extra = ", ".join(extra)
425 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
426
427 @staticmethod
428 def constraint_stridedslice_input_count(op):
429 "Exactly 4 Input tensors are required"
430 inputs = len(op.inputs)
431 valid = inputs == 4
432 return valid, f"Op has {inputs} inputs"
433
434 @staticmethod
435 def constraint_pad_input_count(op):
436 "Number of input tensors must be exactly 2"
437 inputs = len(op.inputs)
438 valid = inputs == 2
439 return valid, f"Op has {inputs} inputs"
440
441 @staticmethod
442 def constraint_pad_constant(op):
443 "The padding tensor must be constant"
444 pad_tensor = op.inputs[1].values
445 valid = pad_tensor is not None
446 return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"
447
448 @staticmethod
449 def constraint_stridedslice_inputs_const(op):
450 "Begin, End and Stride Input tensors must be constant"
451 valid = True
452 extra = []
453 _, begin, end, strides = op.inputs
454 if begin.values is None:
455 valid = False
456 extra.append(f"Begin tensor '{begin.name}'")
457 if end.values is None:
458 valid = False
459 extra.append(f"End tensor '{end.name}'")
460 if strides.values is None:
461 valid = False
462 extra.append(f"Stride tensor '{strides.name}'")
463 extra = ", ".join(extra)
464 return valid, f"Op has non-constant tensors: {extra}"
465
466 @staticmethod
467 def constraint_ellipsis_mask(op):
468 "ellipsis_mask must be 0"
469 ellipsis = op.attrs["ellipsis_mask"]
470 valid = ellipsis == 0
471 return valid, f"Op has ellipsis mask as: {ellipsis}"
472
473 @staticmethod
474 def constraint_axis_masks(op):
475 "new_axis_mask and shrink_axis_mask cannot both be set"
476 new_axis = op.attrs["new_axis_mask"]
477 shrink_axis = op.attrs["shrink_axis_mask"]
478 valid = (new_axis == 0) or (shrink_axis == 0)
479 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
480
481 @staticmethod
482 def constraint_slice_ranges(op):
483 "Slice 'end' values must be greater than 'begin' values"
484 ifm, begin, end, _ = op.inputs
485 # Calculate offset begin/end
486 offset_begin = get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
487 offset_end = get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
488 # Check "end - begin" doesn't result in any zero or negative elements
489 valid = all((e - b) > 0 for b, e in zip(offset_begin, offset_end))
490 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
491
492 @staticmethod
493 def constraint_matching_inputs_types(op):
494 "Both Input data types must match"
495 ifm_dtype = op.ifm.dtype
496 ifm2_dtype = op.ifm2.dtype
497 valid = ifm_dtype == ifm2_dtype
498 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
499
500 @staticmethod
501 def constraint_matching_signed(op):
502 "For IFM that are signed, OFM must also be signed"
503 valid = True
504 ifm_dtype = op.ifm.dtype
505 ofm_dtype = op.ofm.dtype
506 if ifm_dtype.type & BaseType.Signed:
507 valid = bool(ofm_dtype.type & BaseType.Signed)
508 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
509
510 @staticmethod
511 def constraint_unsigned_valid(op):
512 "For IFM that are unsigned, OFM must either be the same type or int32"
513 valid = True
514 ifm_dtype = op.ifm.dtype
515 ofm_dtype = op.ofm.dtype
516 if ifm_dtype.type & BaseType.Unsigned:
517 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
518 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
519
520 @staticmethod
521 def constraint_input_8bit(op):
522 "IFM must be int8 or uint8"
523 ifm_dtype = op.ifm.dtype
524 valid = (ifm_dtype == DataType.int8) or (ifm_dtype == DataType.uint8)
525 return valid, f"Op has ifm_dtype={ifm_dtype}"
526
527 @staticmethod
528 def constraint_matching_either_shapes(op):
529 "At least one Input's shape must match the OFM's shape"
530 ifm_shape = op.ifm.shape
531 ifm2_shape = op.ifm2.shape if op.ifm2 else None
532 ofm_shape = op.ofm.shape
533 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
534 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
535
536 @staticmethod
537 def constraint_alpha_valid(op):
538 "Alpha must not be negative"
539 alpha = op.attrs["alpha"]
540 valid = alpha >= 0
541 return valid, f"Op has alpha={alpha}"
542
543 @staticmethod
544 def constraint_keep_dim_ifm_ofm(op):
545 "The IFM and OFM must have the same number of dimensions if keep_num_dims is set to true"
546 valid = True
547 if op.attrs.get("keep_num_dims"):
548 valid = len(op.ifm.shape) == len(op.ofm.shape)
549 return valid, f"Op has ifm shape={op.ifm.shape} and ofm shape={op.ofm.shape}"
550
551 @staticmethod
552 def constraint_mean_input_dims(op):
553 "Input tensor must be at least 2D"
554 dims = len(op.inputs[0].shape)
555 return 2 <= dims <= 4, f"Input is {dims}D"
556
557 @staticmethod
558 def constraint_mean_axis(op):
559 "Axis indices must correspond to height and width axes"
560 dims = len(op.inputs[0].shape)
561 axis = int(op.inputs[1].values) if op.inputs[1].shape == [] else list(op.inputs[1].values)
562 if dims == 2 or dims == 3:
563 valid = axis in (0, 1, [0], [1], [0, 1], [1, 0])
564 elif dims == 4:
565 valid = axis in (1, 2, [1], [2], [1, 2], [2, 1])
566 return valid, f"Axis is {axis}"
567
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200568 @staticmethod
569 def constraint_matching_in_out_quant(op):
570 "Input and output quantisation must match."
571 if not check_quantized_tens_scaling_equal(op.ifm, op.ofm):
572 return False, "IFM and OFM quantisation parameters are not equal."
573 return True, "IFM and OFM quantisation parameters matches."
574
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200575
576def tflite_semantic_checker(nng):
577 semantic_checker = TFLiteSemantic()
578 for sg in nng.subgraphs:
579 for op in sg.get_all_ops():
580 op.run_on_npu = semantic_checker.is_operator_semantic_valid(op)
581 return nng