blob: 6e2467bb3bb6cf6ab7a24a218aaeb8f0f8a14a40 [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# The TFLiteSemantic class which is a collection of TensorFlow lite model semantic checks.
18from collections import defaultdict
19
20import numpy as np
21
22from .data_type import BaseType
23from .data_type import DataType
24from .numeric_util import is_integer
25from .operation import get_slice_offsets
26from .operation import Op
27from .supported_operators_util import docstring_format_args
28from .supported_operators_util import list_formatter
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020029from .tensor import check_quantized_tens_scaling_equal
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020030from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
31from .tflite_mapping import optype_to_builtintype
32
33
34def _optype_formatter(op_list):
35 # Convert internal op types to external names
36 output = map(optype_to_builtintype, op_list)
37 # Remove UNKNOWNs
38 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
39 return list_formatter(output)
40
41
42class TFLiteSemantic:
43 # Categorised lists of operators
44 convolution_ops = set((Op.Conv2DBias, Op.Conv2D, Op.QuantizedConv2D,))
45 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
46 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
47 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
48 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
49 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
50 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
51 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
52 binary_elem_wise_min_max_ops = set((Op.Minimum, Op.Maximum,))
53 binary_elem_wise_shift_ops = set((Op.SHL, Op.SHR,))
54 binary_elem_wise_add_mul_sub = set((Op.Add, Op.Mul, Op.Sub,))
55 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
56 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020057 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV, Op.Mean, Op.ExpandDims))
58 reshape_ops = set((Op.Reshape, Op.QuantizedReshape, Op.Squeeze, Op.ExpandDims,))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020059
60 def __init__(self):
61 # Setup the generic constraints. Note: the order matters
62 self.generic_constraints = []
63 self.generic_constraints.append(TFLiteSemantic.constraint_tens_no_dynamic)
64 self.generic_constraints.append(TFLiteSemantic.constraint_tens_defined_shape)
65 self.generic_constraints.append(TFLiteSemantic.constraint_tens_output_scalar)
66 self.generic_constraints.append(TFLiteSemantic.constraint_tens_input_scalar)
67 self.generic_constraints.append(TFLiteSemantic.constraint_tens_shape_size)
68
69 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_none_check)
70 self.generic_constraints.append(TFLiteSemantic.constraint_tens_quant_scale)
71 self.generic_constraints.append(TFLiteSemantic.constraint_quant_scale_inf)
72
73 # Setup specific constraints. Note: the order matters
74 self.specific_constraints = defaultdict(list)
75
76 # Conv-like checks:
77 for op_type in TFLiteSemantic.convolution_like_ops:
78 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
79 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_dilation_type)
80
81 # Pooling checks:
82 for op_type in TFLiteSemantic.pooling_ops:
83 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_stride_type)
84 # AVG pooling specific checks:
85 for op_type in TFLiteSemantic.avg_pooling_ops:
86 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
87 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
88 # MAX pooling specific checks:
89 for op_type in TFLiteSemantic.max_pooling_ops:
90 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
91 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_filter_type)
92
93 # Concat specific checks:
94 for op_type in (Op.Concat, Op.ConcatTFLite):
95 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_exists)
96 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_axis_valid)
97 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_dimensionality)
98 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_valid_dimensions)
99
100 # Element-wise checks:
101 for op_type in TFLiteSemantic.elem_wise_main_ops:
102 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_either_shapes)
103 # Unary specific checks:
104 for op_type in TFLiteSemantic.unary_elem_wise_main_ops:
105 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
106 # Binary Min/Max specific checks:
107 for op_type in TFLiteSemantic.binary_elem_wise_min_max_ops:
108 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_types)
109 # Binary Add/Mul/Sub specific checks:
110 for op_type in TFLiteSemantic.binary_elem_wise_add_mul_sub:
111 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_inputs_types)
112 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_signed)
113 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_unsigned_valid)
114
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200115 # Ops reshaping dimensions: Reshape, Squeeze and ExpandDims
116 for op_type in TFLiteSemantic.reshape_ops:
117 self.specific_constraints[op_type].append(TFLiteSemantic.constraint_matching_in_out_quant)
118
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200119 # Softmax specific checks:
120 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_shapes)
121 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_matching_in_out_types)
122 self.specific_constraints[Op.Softmax].append(TFLiteSemantic.constraint_beta_value_range)
123
124 # SplitV specific checks:
125 self.specific_constraints[Op.SplitV].append(TFLiteSemantic.constraint_splitv_inferred)
126
127 # StridedSlice specific checks:
128 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_input_count)
129 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_stridedslice_inputs_const)
130 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_ellipsis_mask)
131 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_axis_masks)
132 self.specific_constraints[Op.StridedSlice].append(TFLiteSemantic.constraint_slice_ranges)
133
134 # LeakyRelu specific checks:
135 self.specific_constraints[Op.LeakyRelu].append(TFLiteSemantic.constraint_alpha_valid)
136
137 # FullyConnected specific checks:
138 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_fc_output_2d)
139 self.specific_constraints[Op.FullyConnected].append(TFLiteSemantic.constraint_keep_dim_ifm_ofm)
140
141 # Pad specific checks:
142 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_input_count)
143 self.specific_constraints[Op.Pad].append(TFLiteSemantic.constraint_pad_constant)
144
145 # HardSwish specific checks:
146 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_input_8bit)
147 self.specific_constraints[Op.HardSwish].append(TFLiteSemantic.constraint_matching_in_out_types)
148 # Mean specific checks:
149 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_input_8bit)
150 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_input_dims)
151 self.specific_constraints[Op.Mean].append(TFLiteSemantic.constraint_mean_axis)
152
153 def is_operator_semantic_valid(self, op):
154 ext_type = optype_to_builtintype(op.type)
155
156 if op.type in (Op.Placeholder, Op.SubgraphInput, Op.Const):
157 return True
158
159 for constraint in self.generic_constraints + self.specific_constraints[op.type]:
160 valid, extra = constraint(op)
161 if not valid:
162 print(
163 f"Warning: unsupported TensorFlow Lite semantics for {ext_type} '{op.name}'. Placing on CPU instead"
164 )
165 print(f" - {constraint.__doc__}")
166 if extra:
167 print(f" {extra}")
168 return False
169
170 return True
171
172 @staticmethod
173 def constraint_tens_no_dynamic(op):
174 "Input(s) and Output tensors must not be dynamic"
175 valid = True
176 extra = []
177 tensors = [tens for tens in op.inputs + op.outputs if tens]
178 for tens in tensors:
179 if (tens.shape == []) and (tens.values is None):
180 valid = False
181 extra.append(tens.name)
182 extra = ", ".join(extra)
183 return valid, f"Op has dynamic tensor(s): {extra}"
184
185 @staticmethod
186 def constraint_tens_defined_shape(op):
187 "Input(s) and Output tensors must have a defined shape"
188 valid = True
189 extra = []
190 tensors = [tens for tens in op.inputs + op.outputs if tens]
191 for tens in tensors:
192 if not tens.has_fully_defined_shape():
193 valid = False
194 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
195 return valid, ", ".join(extra)
196
197 @staticmethod
198 def constraint_tens_output_scalar(op):
199 "Output tensors cannot be scalar"
200 ofm = op.ofm
201 valid = ofm.shape != []
202 return valid, f"Output Tensor '{ofm.name}' is scalar"
203
204 @classmethod
205 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
206 def constraint_tens_input_scalar(cls, op):
207 "Scalar Input tensors are only valid for op type: {}"
208 valid = True
209 extra = []
210 tensors = [tens for tens in op.inputs if tens]
211 for tens in tensors:
212 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
213 valid = False
214 extra.append(tens.name)
215 extra = ", ".join(extra)
216 return valid, f"Op has scalar input tensor(s): {extra}"
217
218 @staticmethod
219 def constraint_tens_shape_size(op):
220 "Input(s) and Output tensors must not be greater than 4D"
221 valid = True
222 extra = []
223 tensors = [tens for tens in op.inputs + op.outputs if tens]
224 for tens in tensors:
225 if len(tens.shape) > 4:
226 valid = False
227 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
228 return valid, ", ".join(extra)
229
230 @staticmethod
231 def constraint_tens_quant_none_check(op):
232 "Input(s), Output and Weight tensors must have quantization parameters"
233 valid = True
234 extra = []
235 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
236 for tens in tensors:
237 if tens.quantization is None:
238 valid = False
239 extra.append(tens.name)
240 extra = ", ".join(extra)
241 return valid, f"Op has tensors with missing quantization parameters: {extra}"
242
243 @staticmethod
244 def constraint_tens_quant_scale(op):
245 "Input(s), Output and Weight tensors with quantization scales must be finite"
246 valid = True
247 extra = []
248 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
249 for tens in tensors:
250 if (tens.quantization.scale_f32 is not None) and np.isinf(tens.quantization.scale_f32).any():
251 valid = False
252 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
253 return valid, ", ".join(extra)
254
255 @staticmethod
256 def constraint_fc_output_2d(op):
257 "The output tensor(s) must have 2D shape"
258 valid = True
259 extra = []
260 for tens in op.outputs:
261 if len(tens.shape) != 2:
262 valid = False
263 extra.append(f"Tensor '{tens.name}' is {len(tens.shape)}D")
264 return valid, ", ".join(extra)
265
266 @staticmethod
267 def constraint_stride_type(op):
268 "Stride values for both width and height must be integer types"
269 w, h = op.get_kernel_stride()
270 valid = is_integer(w) and is_integer(h)
271 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
272
273 @staticmethod
274 def constraint_dilation_type(op):
275 "Dilation factor values for both width and height must be integer types"
276 w, h = op.get_kernel_dilation()
277 valid = is_integer(w) and is_integer(h)
278 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
279
280 @staticmethod
281 def constraint_quant_scale_inf(op):
282 "Input and Output tensors must have quantization scales that fit within float32 precision"
283 if op.ofm is not None and op.ofm.is_quantized():
284 ofm_scale = op.ofm.quantization.scale_f32
285 if ofm_scale < np.finfo(np.float32).tiny:
286 return (
287 False,
288 f"The quantization scale of the output tensor is {ofm_scale}, "
289 + f"minimum supported is: {np.finfo(np.float32).tiny}",
290 )
291 if op.ifm is not None and op.ifm.is_quantized():
292 ifm_scale = op.ifm.quantization.scale_f32
293 if np.isinf(ifm_scale / ofm_scale):
294 return (
295 False,
296 f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
297 )
298 return True, "Op's quantization is ok"
299
300 @staticmethod
301 def constraint_matching_in_out_types(op):
302 "IFM and OFM data types must match"
303 ifm_dtype = op.ifm.dtype
304 ofm_dtype = op.ofm.dtype
305 valid = ifm_dtype == ofm_dtype
306 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
307
308 @staticmethod
309 def constraint_beta_value_range(op):
310 "Beta value needs to be positive"
311 beta = op.attrs.get("beta", 1.0)
312 valid = beta >= 0
313 return valid, f"Op has beta={beta}"
314
315 @staticmethod
316 def constraint_filter_type(op):
317 "Kernel filter values for both width and height must be integer types"
318 w = op.kernel.width
319 h = op.kernel.height
320 valid = is_integer(w) and is_integer(h)
321 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
322
323 @staticmethod
324 def constraint_matching_shapes(op):
325 "IFM and OFM shapes must match"
326 ifm_shape = op.ifm.shape
327 ofm_shape = op.ofm.shape
328 valid = ifm_shape == ofm_shape
329 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
330
331 @staticmethod
332 def constraint_splitv_inferred(op):
333 "Only one size is allowed to be inferred"
334 sizes = op.inputs[1].values
335 valid = np.count_nonzero(sizes == -1) <= 1
336 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
337
338 @staticmethod
339 def constraint_axis_exists(op):
340 "Axis attribute must exist"
341 axis = op.attrs.get("axis")
342 valid = axis is not None
343 return valid, f"Op has axis={axis}"
344
345 @staticmethod
346 def constraint_axis_valid(op):
347 "Axis attribute must be in the range [0, <ofm_dimensions>)"
348 dims = len(op.ofm.shape)
349 axis = op.attrs["axis"]
350 axis += dims if axis < 0 else 0
351 valid = 0 <= axis < dims
352 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
353
354 @staticmethod
355 def constraint_matching_dimensionality(op):
356 "All Input dimensionalities must match OFM dimensionality"
357 valid = True
358 extra = []
359 ofm_dim = len(op.ofm.shape)
360 tensors = [tens for tens in op.inputs if tens]
361 for tens in tensors:
362 dim = len(tens.shape)
363 if dim != ofm_dim:
364 valid = False
365 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
366 extra = ", ".join(extra)
367 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
368
369 @staticmethod
370 def constraint_valid_dimensions(op):
371 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
372 valid = True
373 extra = []
374 ofm_shape = op.ofm.shape
375 ofm_dim = len(ofm_shape)
376 axis = op.attrs["axis"]
377 axis += ofm_dim if axis < 0 else 0
378 tensors = [tens for tens in op.inputs if tens]
379 for tens in tensors:
380 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
381 valid = False
382 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
383 extra = ", ".join(extra)
384 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
385
386 @staticmethod
387 def constraint_stridedslice_input_count(op):
388 "Exactly 4 Input tensors are required"
389 inputs = len(op.inputs)
390 valid = inputs == 4
391 return valid, f"Op has {inputs} inputs"
392
393 @staticmethod
394 def constraint_pad_input_count(op):
395 "Number of input tensors must be exactly 2"
396 inputs = len(op.inputs)
397 valid = inputs == 2
398 return valid, f"Op has {inputs} inputs"
399
400 @staticmethod
401 def constraint_pad_constant(op):
402 "The padding tensor must be constant"
403 pad_tensor = op.inputs[1].values
404 valid = pad_tensor is not None
405 return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"
406
407 @staticmethod
408 def constraint_stridedslice_inputs_const(op):
409 "Begin, End and Stride Input tensors must be constant"
410 valid = True
411 extra = []
412 _, begin, end, strides = op.inputs
413 if begin.values is None:
414 valid = False
415 extra.append(f"Begin tensor '{begin.name}'")
416 if end.values is None:
417 valid = False
418 extra.append(f"End tensor '{end.name}'")
419 if strides.values is None:
420 valid = False
421 extra.append(f"Stride tensor '{strides.name}'")
422 extra = ", ".join(extra)
423 return valid, f"Op has non-constant tensors: {extra}"
424
425 @staticmethod
426 def constraint_ellipsis_mask(op):
427 "ellipsis_mask must be 0"
428 ellipsis = op.attrs["ellipsis_mask"]
429 valid = ellipsis == 0
430 return valid, f"Op has ellipsis mask as: {ellipsis}"
431
432 @staticmethod
433 def constraint_axis_masks(op):
434 "new_axis_mask and shrink_axis_mask cannot both be set"
435 new_axis = op.attrs["new_axis_mask"]
436 shrink_axis = op.attrs["shrink_axis_mask"]
437 valid = (new_axis == 0) or (shrink_axis == 0)
438 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
439
440 @staticmethod
441 def constraint_slice_ranges(op):
442 "Slice 'end' values must be greater than 'begin' values"
443 ifm, begin, end, _ = op.inputs
444 # Calculate offset begin/end
445 offset_begin = get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
446 offset_end = get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
447 # Check "end - begin" doesn't result in any zero or negative elements
448 valid = all((e - b) > 0 for b, e in zip(offset_begin, offset_end))
449 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
450
451 @staticmethod
452 def constraint_matching_inputs_types(op):
453 "Both Input data types must match"
454 ifm_dtype = op.ifm.dtype
455 ifm2_dtype = op.ifm2.dtype
456 valid = ifm_dtype == ifm2_dtype
457 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
458
459 @staticmethod
460 def constraint_matching_signed(op):
461 "For IFM that are signed, OFM must also be signed"
462 valid = True
463 ifm_dtype = op.ifm.dtype
464 ofm_dtype = op.ofm.dtype
465 if ifm_dtype.type & BaseType.Signed:
466 valid = bool(ofm_dtype.type & BaseType.Signed)
467 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
468
469 @staticmethod
470 def constraint_unsigned_valid(op):
471 "For IFM that are unsigned, OFM must either be the same type or int32"
472 valid = True
473 ifm_dtype = op.ifm.dtype
474 ofm_dtype = op.ofm.dtype
475 if ifm_dtype.type & BaseType.Unsigned:
476 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
477 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
478
479 @staticmethod
480 def constraint_input_8bit(op):
481 "IFM must be int8 or uint8"
482 ifm_dtype = op.ifm.dtype
483 valid = (ifm_dtype == DataType.int8) or (ifm_dtype == DataType.uint8)
484 return valid, f"Op has ifm_dtype={ifm_dtype}"
485
486 @staticmethod
487 def constraint_matching_either_shapes(op):
488 "At least one Input's shape must match the OFM's shape"
489 ifm_shape = op.ifm.shape
490 ifm2_shape = op.ifm2.shape if op.ifm2 else None
491 ofm_shape = op.ofm.shape
492 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
493 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
494
495 @staticmethod
496 def constraint_alpha_valid(op):
497 "Alpha must not be negative"
498 alpha = op.attrs["alpha"]
499 valid = alpha >= 0
500 return valid, f"Op has alpha={alpha}"
501
502 @staticmethod
503 def constraint_keep_dim_ifm_ofm(op):
504 "The IFM and OFM must have the same number of dimensions if keep_num_dims is set to true"
505 valid = True
506 if op.attrs.get("keep_num_dims"):
507 valid = len(op.ifm.shape) == len(op.ofm.shape)
508 return valid, f"Op has ifm shape={op.ifm.shape} and ofm shape={op.ofm.shape}"
509
510 @staticmethod
511 def constraint_mean_input_dims(op):
512 "Input tensor must be at least 2D"
513 dims = len(op.inputs[0].shape)
514 return 2 <= dims <= 4, f"Input is {dims}D"
515
516 @staticmethod
517 def constraint_mean_axis(op):
518 "Axis indices must correspond to height and width axes"
519 dims = len(op.inputs[0].shape)
520 axis = int(op.inputs[1].values) if op.inputs[1].shape == [] else list(op.inputs[1].values)
521 if dims == 2 or dims == 3:
522 valid = axis in (0, 1, [0], [1], [0, 1], [1, 0])
523 elif dims == 4:
524 valid = axis in (1, 2, [1], [2], [1, 2], [2, 1])
525 return valid, f"Axis is {axis}"
526
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200527 @staticmethod
528 def constraint_matching_in_out_quant(op):
529 "Input and output quantisation must match."
530 if not check_quantized_tens_scaling_equal(op.ifm, op.ofm):
531 return False, "IFM and OFM quantisation parameters are not equal."
532 return True, "IFM and OFM quantisation parameters matches."
533
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200534
535def tflite_semantic_checker(nng):
536 semantic_checker = TFLiteSemantic()
537 for sg in nng.subgraphs:
538 for op in sg.get_all_ops():
539 op.run_on_npu = semantic_checker.is_operator_semantic_valid(op)
540 return nng