blob: abda886c625649941df4603672ca823c7eb7aa10 [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# The TFLiteSemantic class which is a collection of TensorFlow lite model semantic checks.
18from collections import defaultdict
19
20import numpy as np
21
22from .data_type import BaseType
23from .data_type import DataType
24from .numeric_util import is_integer
25from .operation import get_slice_offsets
26from .operation import Op
27from .supported_operators_util import docstring_format_args
28from .supported_operators_util import list_formatter
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020029from .tensor import check_quantized_tens_scaling_equal
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020030from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
31from .tflite_mapping import optype_to_builtintype
32
33
34def _optype_formatter(op_list):
35 # Convert internal op types to external names
36 output = map(optype_to_builtintype, op_list)
37 # Remove UNKNOWNs
38 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
39 return list_formatter(output)
40
41
42class TFLiteSemantic:
43 # Categorised lists of operators
Jonas Ohlssond8575072022-03-30 10:30:25 +020044 convolution_ops = set(
45 (
46 Op.Conv2DBias,
47 Op.Conv2D,
48 Op.QuantizedConv2D,
49 )
50 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020051 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
52 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
53 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
54 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
55 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
56 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
57 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
Jonas Ohlssond8575072022-03-30 10:30:25 +020058 binary_elem_wise_min_max_ops = set(
59 (
60 Op.Minimum,
61 Op.Maximum,
62 )
63 )
64 binary_elem_wise_shift_ops = set(
65 (
66 Op.SHL,
67 Op.SHR,
68 )
69 )
70 binary_elem_wise_add_mul_sub = set(
71 (
72 Op.Add,
73 Op.Mul,
74 Op.Sub,
75 )
76 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020077 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
78 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Fredrik Svedberg11563172022-07-06 14:54:12 +020079 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV, Op.Mean, Op.ExpandDims, Op.Quantize))
Jonas Ohlssond8575072022-03-30 10:30:25 +020080 reshape_ops = set(
81 (
82 Op.Reshape,
83 Op.QuantizedReshape,
84 Op.Squeeze,
85 Op.ExpandDims,
86 )
87 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020088
89 def __init__(self):
90 # Setup the generic constraints. Note: the order matters
91 self.generic_constraints = []
92 self.generic_constraints.append(TFLiteSemantic.constraint_tens_no_dynamic)
93 self.generic_constraints.append(TFLiteSemantic.constraint_tens_defined_shape)
94 self.generic_constraints.append(TFLiteSemantic.constraint_tens_output_scalar)
95 self.generic_constraints.append(TFLiteSemantic.constraint_tens_input_scalar)
96 self.generic_constraints.append(TFLiteSemantic.constraint_tens_shape_size)
97
98 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_none_check)
99 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_scale)
100 self.generic_constraints.append(TFLiteSemantic.constraint_quant_scale_inf)
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100101 self.generic_constraints.append(TFLiteSemantic.constraint_none_const_tensors)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200102
103 # Setup specific constraints. Note: the order matters
104 self.specific_constraints = defaultdict(list)
105
106 # Conv-like checks:
107 for op_type in TFLiteSemantic.convolution_like_ops:
108 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
109 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_dilation_type)
110
111 # Pooling checks:
112 for op_type in TFLiteSemantic.pooling_ops:
113 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
114 # AVG pooling specific checks:
115 for op_type in TFLiteSemantic.avg_pooling_ops:
116 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
117 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
118 # MAX pooling specific checks:
119 for op_type in TFLiteSemantic.max_pooling_ops:
120 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
121 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
122
123 # Concat specific checks:
124 for op_type in (Op.Concat, Op.ConcatTFLite):
125 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_exists)
126 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_valid)
127 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_dimensionality)
128 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_valid_dimensions)
129
130 # Element-wise checks:
131 for op_type in TFLiteSemantic.elem_wise_main_ops:
132 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_either_shapes)
133 # Unary specific checks:
134 for op_type in TFLiteSemantic.unary_elem_wise_main_ops:
135 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
136 # Binary Min/Max specific checks:
137 for op_type in TFLiteSemantic.binary_elem_wise_min_max_ops:
138 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
139 # Binary Add/Mul/Sub specific checks:
140 for op_type in TFLiteSemantic.binary_elem_wise_add_mul_sub:
141 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_inputs_types)
142 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_signed)
143 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_unsigned_valid)
144
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200145 # Ops reshaping dimensions: Reshape, Squeeze and ExpandDims
146 for op_type in TFLiteSemantic.reshape_ops:
147 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_quant)
148
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200149 # Softmax specific checks:
150 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_shapes)
151 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_in_out_types)
152 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_beta_value_range)
153
154 # SplitV specific checks:
155 self.specific_constraints[Op.SplitV].append(TFLiteSemantic.constraint_splitv_inferred)
156
157 # StridedSlice specific checks:
158 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_input_count)
159 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_inputs_const)
160 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_ellipsis_mask)
161 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_axis_masks)
162 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_slice_ranges)
163
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200164 # FullyConnected specific checks:
165 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_fc_output_2d)
166 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_keep_dim_ifm_ofm)
167
168 # Pad specific checks:
169 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_input_count)
170 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_constant)
171
172 # HardSwish specific checks:
173 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_input_8bit)
174 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_matching_in_out_types)
Fredrik Svedberg701ba912022-09-07 16:01:15 +0200175
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200176 # Mean specific checks:
177 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_input_8bit)
178 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_input_dims)
179 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_axis)
180
181 def is_operator_semantic_valid(self, op):
182 ext_type = optype_to_builtintype(op.type)
183
184 if op.type in (Op.Placeholder, Op.SubgraphInput, Op.Const):
185 return True
186
Ayaan Masood4965fae2022-06-29 11:30:57 +0100187 # Generic constraints list filtered out to exclude certain constraints depending on op.type
188 filtered_generic_constraints = []
189
190 for constraint in self.generic_constraints:
191 # Check constraint not in dictionary otherwise return empty array
192 if constraint not in self.get_generic_constraint_exclude_list().get(op.type, []):
193 filtered_generic_constraints.append(constraint)
194
195 for constraint in filtered_generic_constraints + self.specific_constraints[op.type]:
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200196 valid, extra = constraint(op)
197 if not valid:
198 print(
Tim Hall3584a9c2021-11-18 22:05:17 +0000199 f"Warning: Unsupported TensorFlow Lite semantics for {ext_type} '{op.name}'. Placing on CPU instead"
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200200 )
201 print(f" - {constraint.__doc__}")
202 if extra:
203 print(f" {extra}")
204 return False
205
206 return True
207
208 @staticmethod
Ayaan Masood4965fae2022-06-29 11:30:57 +0100209 def get_generic_constraint_exclude_list():
210
211 # Not all generic constraints can be applied to each operator
212 generic_constraints_exclude_list = {
213 Op.Shape: [
214 TFLiteSemantic.constraint_tens_quant_none_check,
Ayaan Masood25f48dd2022-06-29 18:16:04 +0100215 ],
216 Op.Quantize: [
217 TFLiteSemantic.constraint_tens_no_dynamic,
218 TFLiteSemantic.constraint_tens_output_scalar,
Ayaan Masood25f48dd2022-06-29 18:16:04 +0100219 ],
Ayaan Masood4965fae2022-06-29 11:30:57 +0100220 }
221 return generic_constraints_exclude_list
222
223 @staticmethod
erik.andersson@arm.com3bbbed62021-12-20 14:14:16 +0100224 def constraint_none_const_tensors(op):
225 "Constant tensors should not have NoneType-values"
226 valid = True
227 extra = ""
228 for tens in filter(None, op.inputs):
229 if len(tens.ops) > 0 and tens.ops[0].type == Op.Const and tens.values is None:
230 valid = False
231 extra = str(tens.name)
232 return valid, f"Unexpected None value for constant tensor: {extra}"
233
234 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200235 def constraint_tens_no_dynamic(op):
236 "Input(s) and Output tensors must not be dynamic"
237 valid = True
238 extra = []
239 tensors = [tens for tens in op.inputs + op.outputs if tens]
240 for tens in tensors:
241 if (tens.shape == []) and (tens.values is None):
242 valid = False
243 extra.append(tens.name)
244 extra = ", ".join(extra)
245 return valid, f"Op has dynamic tensor(s): {extra}"
246
247 @staticmethod
248 def constraint_tens_defined_shape(op):
249 "Input(s) and Output tensors must have a defined shape"
250 valid = True
251 extra = []
252 tensors = [tens for tens in op.inputs + op.outputs if tens]
253 for tens in tensors:
254 if not tens.has_fully_defined_shape():
255 valid = False
256 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
257 return valid, ", ".join(extra)
258
259 @staticmethod
260 def constraint_tens_output_scalar(op):
261 "Output tensors cannot be scalar"
262 ofm = op.ofm
263 valid = ofm.shape != []
264 return valid, f"Output Tensor '{ofm.name}' is scalar"
265
266 @classmethod
267 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
268 def constraint_tens_input_scalar(cls, op):
269 "Scalar Input tensors are only valid for op type: {}"
270 valid = True
271 extra = []
272 tensors = [tens for tens in op.inputs if tens]
273 for tens in tensors:
274 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
275 valid = False
276 extra.append(tens.name)
277 extra = ", ".join(extra)
278 return valid, f"Op has scalar input tensor(s): {extra}"
279
280 @staticmethod
281 def constraint_tens_shape_size(op):
282 "Input(s) and Output tensors must not be greater than 4D"
283 valid = True
284 extra = []
285 tensors = [tens for tens in op.inputs + op.outputs if tens]
286 for tens in tensors:
287 if len(tens.shape) > 4:
288 valid = False
289 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
290 return valid, ", ".join(extra)
291
292 @staticmethod
293 def constraint_tens_quant_none_check(op):
294 "Input(s), Output and Weight tensors must have quantization parameters"
295 valid = True
296 extra = []
297 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
298 for tens in tensors:
299 if tens.quantization is None:
300 valid = False
301 extra.append(tens.name)
302 extra = ", ".join(extra)
303 return valid, f"Op has tensors with missing quantization parameters: {extra}"
304
305 @staticmethod
306 def constraint_tens_quant_scale(op):
307 "Input(s), Output and Weight tensors with quantization scales must be finite"
308 valid = True
309 extra = []
310 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
311 for tens in tensors:
Fredrik Svedberg11563172022-07-06 14:54:12 +0200312 if (
313 tens.quantization
314 and tens.quantization.scale_f32 is not None
315 and np.isinf(tens.quantization.scale_f32).any()
316 ):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200317 valid = False
318 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
319 return valid, ", ".join(extra)
320
321 @staticmethod
322 def constraint_fc_output_2d(op):
Ayaan Masooda2ec5aa2022-04-21 14:28:03 +0100323 """The output tensor(s) must have 2D shape"""
324 valid = op.ifm.get_shape_as_2d(op.weights.shape[-2]) is not None
325 extra = f"Op has non-2D output tensor '{op.ofm.name}'" if not valid else ""
326
327 return valid, extra
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200328
329 @staticmethod
330 def constraint_stride_type(op):
331 "Stride values for both width and height must be integer types"
332 w, h = op.get_kernel_stride()
333 valid = is_integer(w) and is_integer(h)
334 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
335
336 @staticmethod
337 def constraint_dilation_type(op):
338 "Dilation factor values for both width and height must be integer types"
339 w, h = op.get_kernel_dilation()
340 valid = is_integer(w) and is_integer(h)
341 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
342
343 @staticmethod
344 def constraint_quant_scale_inf(op):
345 "Input and Output tensors must have quantization scales that fit within float32 precision"
346 if op.ofm is not None and op.ofm.is_quantized():
347 ofm_scale = op.ofm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200348 if np.any(ofm_scale < np.finfo(np.float32).tiny):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200349 return (
350 False,
351 f"The quantization scale of the output tensor is {ofm_scale}, "
352 + f"minimum supported is: {np.finfo(np.float32).tiny}",
353 )
354 if op.ifm is not None and op.ifm.is_quantized():
355 ifm_scale = op.ifm.quantization.scale_f32
Dwight Lidman4caf29d2021-10-08 14:26:54 +0200356 if np.any(np.isinf(ifm_scale / ofm_scale)):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200357 return (
358 False,
359 f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
360 )
361 return True, "Op's quantization is ok"
362
363 @staticmethod
364 def constraint_matching_in_out_types(op):
365 "IFM and OFM data types must match"
366 ifm_dtype = op.ifm.dtype
367 ofm_dtype = op.ofm.dtype
368 valid = ifm_dtype == ofm_dtype
369 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
370
371 @staticmethod
372 def constraint_beta_value_range(op):
373 "Beta value needs to be positive"
374 beta = op.attrs.get("beta", 1.0)
375 valid = beta >= 0
376 return valid, f"Op has beta={beta}"
377
378 @staticmethod
379 def constraint_filter_type(op):
380 "Kernel filter values for both width and height must be integer types"
381 w = op.kernel.width
382 h = op.kernel.height
383 valid = is_integer(w) and is_integer(h)
384 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
385
386 @staticmethod
387 def constraint_matching_shapes(op):
388 "IFM and OFM shapes must match"
389 ifm_shape = op.ifm.shape
390 ofm_shape = op.ofm.shape
391 valid = ifm_shape == ofm_shape
392 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
393
394 @staticmethod
395 def constraint_splitv_inferred(op):
396 "Only one size is allowed to be inferred"
397 sizes = op.inputs[1].values
398 valid = np.count_nonzero(sizes == -1) <= 1
399 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
400
401 @staticmethod
402 def constraint_axis_exists(op):
403 "Axis attribute must exist"
404 axis = op.attrs.get("axis")
405 valid = axis is not None
406 return valid, f"Op has axis={axis}"
407
408 @staticmethod
409 def constraint_axis_valid(op):
410 "Axis attribute must be in the range [0, <ofm_dimensions>)"
411 dims = len(op.ofm.shape)
412 axis = op.attrs["axis"]
413 axis += dims if axis < 0 else 0
414 valid = 0 <= axis < dims
415 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
416
417 @staticmethod
418 def constraint_matching_dimensionality(op):
419 "All Input dimensionalities must match OFM dimensionality"
420 valid = True
421 extra = []
422 ofm_dim = len(op.ofm.shape)
423 tensors = [tens for tens in op.inputs if tens]
424 for tens in tensors:
425 dim = len(tens.shape)
426 if dim != ofm_dim:
427 valid = False
428 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
429 extra = ", ".join(extra)
430 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
431
432 @staticmethod
433 def constraint_valid_dimensions(op):
434 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
435 valid = True
436 extra = []
437 ofm_shape = op.ofm.shape
438 ofm_dim = len(ofm_shape)
439 axis = op.attrs["axis"]
440 axis += ofm_dim if axis < 0 else 0
441 tensors = [tens for tens in op.inputs if tens]
442 for tens in tensors:
443 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
444 valid = False
445 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
446 extra = ", ".join(extra)
447 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
448
449 @staticmethod
450 def constraint_stridedslice_input_count(op):
451 "Exactly 4 Input tensors are required"
452 inputs = len(op.inputs)
453 valid = inputs == 4
454 return valid, f"Op has {inputs} inputs"
455
456 @staticmethod
457 def constraint_pad_input_count(op):
458 "Number of input tensors must be exactly 2"
459 inputs = len(op.inputs)
460 valid = inputs == 2
461 return valid, f"Op has {inputs} inputs"
462
463 @staticmethod
464 def constraint_pad_constant(op):
465 "The padding tensor must be constant"
466 pad_tensor = op.inputs[1].values
467 valid = pad_tensor is not None
468 return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"
469
470 @staticmethod
471 def constraint_stridedslice_inputs_const(op):
472 "Begin, End and Stride Input tensors must be constant"
473 valid = True
474 extra = []
475 _, begin, end, strides = op.inputs
476 if begin.values is None:
477 valid = False
478 extra.append(f"Begin tensor '{begin.name}'")
479 if end.values is None:
480 valid = False
481 extra.append(f"End tensor '{end.name}'")
482 if strides.values is None:
483 valid = False
484 extra.append(f"Stride tensor '{strides.name}'")
485 extra = ", ".join(extra)
486 return valid, f"Op has non-constant tensors: {extra}"
487
488 @staticmethod
489 def constraint_ellipsis_mask(op):
490 "ellipsis_mask must be 0"
491 ellipsis = op.attrs["ellipsis_mask"]
492 valid = ellipsis == 0
493 return valid, f"Op has ellipsis mask as: {ellipsis}"
494
495 @staticmethod
496 def constraint_axis_masks(op):
497 "new_axis_mask and shrink_axis_mask cannot both be set"
498 new_axis = op.attrs["new_axis_mask"]
499 shrink_axis = op.attrs["shrink_axis_mask"]
500 valid = (new_axis == 0) or (shrink_axis == 0)
501 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
502
503 @staticmethod
504 def constraint_slice_ranges(op):
505 "Slice 'end' values must be greater than 'begin' values"
506 ifm, begin, end, _ = op.inputs
507 # Calculate offset begin/end
508 offset_begin = get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
509 offset_end = get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
510 # Check "end - begin" doesn't result in any zero or negative elements
511 valid = all((e - b) > 0 for b, e in zip(offset_begin, offset_end))
512 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
513
514 @staticmethod
515 def constraint_matching_inputs_types(op):
516 "Both Input data types must match"
517 ifm_dtype = op.ifm.dtype
518 ifm2_dtype = op.ifm2.dtype
519 valid = ifm_dtype == ifm2_dtype
520 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
521
522 @staticmethod
523 def constraint_matching_signed(op):
524 "For IFM that are signed, OFM must also be signed"
525 valid = True
526 ifm_dtype = op.ifm.dtype
527 ofm_dtype = op.ofm.dtype
528 if ifm_dtype.type & BaseType.Signed:
529 valid = bool(ofm_dtype.type & BaseType.Signed)
530 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
531
532 @staticmethod
533 def constraint_unsigned_valid(op):
534 "For IFM that are unsigned, OFM must either be the same type or int32"
535 valid = True
536 ifm_dtype = op.ifm.dtype
537 ofm_dtype = op.ofm.dtype
538 if ifm_dtype.type & BaseType.Unsigned:
539 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
540 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
541
542 @staticmethod
543 def constraint_input_8bit(op):
544 "IFM must be int8 or uint8"
545 ifm_dtype = op.ifm.dtype
546 valid = (ifm_dtype == DataType.int8) or (ifm_dtype == DataType.uint8)
547 return valid, f"Op has ifm_dtype={ifm_dtype}"
548
549 @staticmethod
550 def constraint_matching_either_shapes(op):
551 "At least one Input's shape must match the OFM's shape"
552 ifm_shape = op.ifm.shape
553 ifm2_shape = op.ifm2.shape if op.ifm2 else None
554 ofm_shape = op.ofm.shape
555 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
556 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
557
558 @staticmethod
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200559 def constraint_keep_dim_ifm_ofm(op):
560 "The IFM and OFM must have the same number of dimensions if keep_num_dims is set to true"
561 valid = True
562 if op.attrs.get("keep_num_dims"):
563 valid = len(op.ifm.shape) == len(op.ofm.shape)
564 return valid, f"Op has ifm shape={op.ifm.shape} and ofm shape={op.ofm.shape}"
565
566 @staticmethod
567 def constraint_mean_input_dims(op):
568 "Input tensor must be at least 2D"
569 dims = len(op.inputs[0].shape)
570 return 2 <= dims <= 4, f"Input is {dims}D"
571
572 @staticmethod
573 def constraint_mean_axis(op):
574 "Axis indices must correspond to height and width axes"
575 dims = len(op.inputs[0].shape)
576 axis = int(op.inputs[1].values) if op.inputs[1].shape == [] else list(op.inputs[1].values)
577 if dims == 2 or dims == 3:
578 valid = axis in (0, 1, [0], [1], [0, 1], [1, 0])
579 elif dims == 4:
580 valid = axis in (1, 2, [1], [2], [1, 2], [2, 1])
581 return valid, f"Axis is {axis}"
582
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200583 @staticmethod
584 def constraint_matching_in_out_quant(op):
585 "Input and output quantisation must match."
586 if not check_quantized_tens_scaling_equal(op.ifm, op.ofm):
587 return False, "IFM and OFM quantisation parameters are not equal."
588 return True, "IFM and OFM quantisation parameters matches."
589
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200590
591def tflite_semantic_checker(nng):
592 semantic_checker = TFLiteSemantic()
593 for sg in nng.subgraphs:
594 for op in sg.get_all_ops():
595 op.run_on_npu = semantic_checker.is_operator_semantic_valid(op)
596 return nng