blob: 1bebe9af79580a70b62fd6dcce662bb69d71ad09 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Michael McGeagh1f951fc2020-10-14 09:30:02 +010018from collections import defaultdict
19
Charles Xu87c13502020-08-06 12:17:26 +020020import numpy as np
21
Tim Hallc30f4952020-06-15 20:47:35 +010022from .data_type import BaseType
23from .data_type import DataType
Dwight Lidman8359a472020-09-28 15:53:40 +020024from .numeric_util import is_integer
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020025from .operation import get_slice_offsets
Louis Verhaardaee5d752020-09-30 09:01:52 +020026from .operation import Op
Michael McGeagh16895482020-12-14 15:51:20 +000027from .operation import Padding
Tim Hall93582962020-09-09 21:58:15 +010028from .tensor import check_quantized_tens_scaling_equal
Michael McGeagh837dc1b2020-11-10 12:38:25 +000029from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
Michael McGeagh219ec072020-11-09 11:11:26 +000030from .tflite_mapping import optype_to_builtintype
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020031
32
Michael McGeagh37ded342020-10-01 15:37:44 +010033# Custom decorator function to allow formatting docstrings containing "{}"
34def docstring_format_args(args):
35 def docstring(func):
36 func.__doc__ = func.__doc__.format(*args)
37 return func
38
39 return docstring
40
41
Michael McGeagh34d29172020-11-25 12:36:23 +000042def _list_formatter(arg):
43 # Order and join into a string representation
44 return ", ".join(sorted(map(str, arg)))
45
46
Michael McGeagh837dc1b2020-11-10 12:38:25 +000047def _optype_formatter(op_list):
48 # Convert internal op types to external names
49 output = map(optype_to_builtintype, op_list)
50 # Remove UNKNOWNs
51 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
Michael McGeagh34d29172020-11-25 12:36:23 +000052 return _list_formatter(output)
Michael McGeagh837dc1b2020-11-10 12:38:25 +000053
54
Tim Hall79d07d22020-04-27 18:20:16 +010055class SupportedOperators:
Michael McGeagh1eeea512020-09-30 14:23:09 +010056 # Categorised lists of supported operators
Louis Verhaardaee5d752020-09-30 09:01:52 +020057 npu_pre_ops = set((Op.SplitSliceRead,))
58 convolution_ops = set((Op.Conv2DBias, Op.Conv2D, Op.QuantizedConv2D,))
59 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
60 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
Michael McGeagh1f951fc2020-10-14 09:30:02 +010061 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
Louis Verhaardaee5d752020-09-30 09:01:52 +020062 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
63 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
64 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
65 resizing_ops = set((Op.ResizeBilinear,))
66 fc_vector_products = set((Op.QuantizedMatMul, Op.MatMul, Op.FullyConnected,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010067 mac_main_ops = (
68 # RNN/LSTM/GRU
Louis Verhaardaee5d752020-09-30 09:01:52 +020069 set((Op.BlockLSTM,))
Michael McGeagh1f951fc2020-10-14 09:30:02 +010070 # conv/depthwiseconv/transposeconv
71 | convolution_like_ops
Michael McGeagh1eeea512020-09-30 14:23:09 +010072 # pooling
73 | pooling_ops
74 # resizing/upscaling
75 | resizing_ops
76 # FC layers
77 | fc_vector_products
78 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020079 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
80 binary_elem_wise_min_max_ops = set((Op.Minimum, Op.Maximum,))
81 binary_elem_wise_shift_ops = set((Op.SHL, Op.SHR,))
82 binary_elem_wise_add_mul_sub = set((Op.Add, Op.Mul, Op.Sub,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010083 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
84 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Erik Anderssonf27a8b62020-12-10 14:58:23 +010085 pad_ops = set((Op.Pad,))
Michael McGeagh37ded342020-10-01 15:37:44 +010086 supported_int32_tensor_ops = (
Louis Verhaardaee5d752020-09-30 09:01:52 +020087 set((Op.ReduceSum, Op.CLZ,)) | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
Michael McGeagh37ded342020-10-01 15:37:44 +010088 )
Michael McGeagh65fd9982020-10-20 11:49:28 +010089 relu_ops = Op.op_set(Op.is_relu_op)
90 activation_ops = relu_ops | set((Op.Tanh, Op.Sigmoid, Op.Softmax,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010091 npu_post_ops = (
Michael McGeagh1eeea512020-09-30 14:23:09 +010092 # activation functions
Louis Verhaardaee5d752020-09-30 09:01:52 +020093 activation_ops
94 # concatenation write direction
95 | set((Op.ConcatSliceWrite,))
96 # Quantization
97 | set((Op.Quantize,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010098 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020099 split_ops = set((Op.Split, Op.SplitV, Op.StridedSlice, Op.Slice, Op.UnpackReshaped, Op.Unpack,))
100 concat_ops = set((Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack,))
Michael McGeagha648aa92020-11-18 15:44:05 +0000101 memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape,)) | concat_ops | split_ops
Louis Verhaardaee5d752020-09-30 09:01:52 +0200102 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV,))
Dwight Lidmanc7187432020-11-16 17:40:46 +0100103 per_axis_quant_ops = convolution_like_ops # per-axis/channel quantization only currently supported for conv ops
Michael McGeagh65fd9982020-10-20 11:49:28 +0100104 supported_fused_activations = relu_ops | set((Op.Tanh, Op.Sigmoid, Op.LUT,))
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100105 supported_operators = npu_pre_ops | mac_main_ops | elem_wise_main_ops | pad_ops | npu_post_ops | memory_only_ops
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100106 # Supported data types
107 supported_op_dtypes = set((DataType.uint8, DataType.int8, DataType.int16, DataType.int32))
108 supported_bias_dtypes = set((DataType.int32, DataType.int64))
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100109 supported_pad_dtypes = set((DataType.int32, DataType.int64))
Michael McGeagh37ded342020-10-01 15:37:44 +0100110 # Defined ranges for allowed values:
111 tens_dim_range = (1, 65535)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100112 stride_range = (1, 3)
113 dilation_range = (1, 2)
114 dilated_height_range = (1, 64)
115 dilated_product_range = (1, 64 * 64)
116 weights_limit = 127 * 65536
Michael McGeagh65fd9982020-10-20 11:49:28 +0100117 filter_range = (1, 8)
118 filter_height_range = (1, 256)
119 filter_product_range = (1, 256 * 256)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100120 # Supported consumers
121 supported_pad_consumers = convolution_ops | depthwise_convolution_ops
Michael McGeagh1eeea512020-09-30 14:23:09 +0100122
Fredrik Svedberg880e7352020-08-25 11:31:47 +0200123 def __init__(self):
Michael McGeagh184b2502020-10-09 17:19:52 +0100124 # Setup the generic constraints. Note: the order matters
Michael McGeagh37ded342020-10-01 15:37:44 +0100125 self.generic_constraints = []
Michael McGeagh65fd9982020-10-20 11:49:28 +0100126 self.generic_constraints.append(SupportedOperators.constraint_tens_no_dynamic)
Michael McGeagh37ded342020-10-01 15:37:44 +0100127 self.generic_constraints.append(SupportedOperators.constraint_tens_defined_shape)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100128 self.generic_constraints.append(SupportedOperators.constraint_tens_output_scalar)
129 self.generic_constraints.append(SupportedOperators.constraint_tens_input_scalar)
Michael McGeagh37ded342020-10-01 15:37:44 +0100130 self.generic_constraints.append(SupportedOperators.constraint_tens_shape_size)
131 self.generic_constraints.append(SupportedOperators.constraint_tens_dtype)
Michael McGeagh184b2502020-10-09 17:19:52 +0100132 self.generic_constraints.append(SupportedOperators.constraint_tens_int32_ops)
Michael McGeagh37ded342020-10-01 15:37:44 +0100133 self.generic_constraints.append(SupportedOperators.constraint_tens_dimension)
Dwight Lidman8359a472020-09-28 15:53:40 +0200134 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_none_check)
Michael McGeagh184b2502020-10-09 17:19:52 +0100135 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_scale)
Dwight Lidmanc7187432020-11-16 17:40:46 +0100136 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_per_axis)
Michael McGeagh184b2502020-10-09 17:19:52 +0100137 self.generic_constraints.append(SupportedOperators.constraint_faf)
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100138 self.generic_constraints.append(SupportedOperators.constraint_quant_scale_inf)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100139
Michael McGeagh65fd9982020-10-20 11:49:28 +0100140 # Setup specific constraints. Note: the order matters
141 self.specific_constraints = defaultdict(list)
142
143 # Conv-like checks:
144 for op_type in SupportedOperators.convolution_like_ops:
145 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_type)
146 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_range)
147 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilation_type)
148 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilation_range)
149 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilated_height_range)
150 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilated_product_range)
151 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_type)
152 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_const)
153 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_limit)
154 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_type)
155 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_40bit)
156 self.specific_constraints[op_type].append(SupportedOperators.constraint_batch_size)
157 # Depthwise Conv specific checks:
158 for op_type in SupportedOperators.depthwise_convolution_ops:
159 self.specific_constraints[op_type].append(SupportedOperators.constraint_depth_multiplier)
160 # Transpose Conv specific checks:
161 for op_type in SupportedOperators.transpose_convolution_ops:
162 self.specific_constraints[op_type].append(SupportedOperators.constraint_tconv_stride)
163 self.specific_constraints[op_type].append(SupportedOperators.constraint_tconv_same)
164 self.specific_constraints[op_type].append(SupportedOperators.constraint_tconv_valid)
165
166 # Pooling checks:
167 for op_type in SupportedOperators.pooling_ops:
168 self.specific_constraints[op_type].append(SupportedOperators.constraint_batch_size)
169 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_type)
170 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_range)
171 # AVG pooling specific checks:
172 for op_type in SupportedOperators.avg_pooling_ops:
173 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
174 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_type)
175 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_range)
176 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_height_range_valid_pad)
177 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_product_range_valid_pad)
178 # MAX pooling specific checks:
179 for op_type in SupportedOperators.max_pooling_ops:
180 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
181 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_type)
182 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_height_range)
183 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_product_range)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100184
185 # Resizing specific checks:
186 for op_type in SupportedOperators.resizing_ops:
187 self.specific_constraints[op_type].append(SupportedOperators.constraint_resize)
188
189 # Vector Product specific checks:
190 for op_type in SupportedOperators.fc_vector_products:
191 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_type)
192 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_const)
193 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_type)
194 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_40bit)
195
196 # Concat specific checks:
197 for op_type in (Op.Concat, Op.ConcatTFLite):
198 self.specific_constraints[op_type].append(SupportedOperators.constraint_axis_exists)
199 self.specific_constraints[op_type].append(SupportedOperators.constraint_axis_valid)
200 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_dimensionality)
201 self.specific_constraints[op_type].append(SupportedOperators.constraint_valid_dimensions)
202
203 # Element-wise checks:
204 for op_type in SupportedOperators.elem_wise_main_ops:
205 self.specific_constraints[op_type].append(SupportedOperators.constraint_elemwise_batch_size)
206 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_either_shapes)
207 # Unary specific checks:
208 for op_type in SupportedOperators.unary_elem_wise_main_ops:
209 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
210 # Binary Min/Max specific checks:
211 for op_type in SupportedOperators.binary_elem_wise_min_max_ops:
212 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
213 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_quantization_parameters)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100214 self.specific_constraints[op_type].append(SupportedOperators.constraint_broadcast_shapes)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100215 # Binary Add/Mul/Sub specific checks:
216 for op_type in SupportedOperators.binary_elem_wise_add_mul_sub:
217 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_inputs_types)
218 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_signed)
219 self.specific_constraints[op_type].append(SupportedOperators.constraint_unsigned_valid)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100220 self.specific_constraints[op_type].append(SupportedOperators.constraint_broadcast_shapes)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100221 # Binary Shift specific checks:
222 for op_type in SupportedOperators.binary_elem_wise_shift_ops:
223 self.specific_constraints[op_type].append(SupportedOperators.constraint_inputs_int32)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100224 self.specific_constraints[op_type].append(SupportedOperators.constraint_broadcast_shapes)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100225
226 # SHL specific checks:
227 self.specific_constraints[Op.SHL].append(SupportedOperators.constraint_output_int32)
228
229 # CLZ specific checks:
230 self.specific_constraints[Op.CLZ].append(SupportedOperators.constraint_output_int32)
231
232 # Softmax specific checks:
233 self.specific_constraints[Op.Softmax].append(SupportedOperators.constraint_matching_shapes)
234 self.specific_constraints[Op.Softmax].append(SupportedOperators.constraint_matching_in_out_types)
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100235 self.specific_constraints[Op.Softmax].append(SupportedOperators.constraint_beta_value_range)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100236
237 # SplitV specific checks:
238 self.specific_constraints[Op.SplitV].append(SupportedOperators.constraint_splitv_inferred)
239
240 # StridedSlice specific checks:
241 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_stridedslice_input_count)
242 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_stridedslice_inputs_const)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100243 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_stridedslice_stride_values)
244 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_ellipsis_mask)
245 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_axis_masks)
246 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_slice_ranges)
247
248 # LeakyRelu specific checks:
249 self.specific_constraints[Op.LeakyRelu].append(SupportedOperators.constraint_alpha_valid)
Tim Hall79d07d22020-04-27 18:20:16 +0100250
Dwight Lidman0dd21c72020-11-24 13:45:50 +0100251 # FullyConnected specific checks:
252 self.specific_constraints[Op.FullyConnected].append(SupportedOperators.constraint_fc_output_2d)
253
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100254 # Pad specific checks:
255 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_matching_in_out_types)
256 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_matching_quantization_parameters)
257 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_pad_input_count)
258 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_pad_shape)
259 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_padding_dimensions)
260 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_pad_type)
261 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_pad_constant)
262 self.specific_constraints[Op.Pad].append(SupportedOperators.constraint_pad_ofm)
263
Tim Hall79d07d22020-04-27 18:20:16 +0100264 def is_operator_supported(self, op):
Michael McGeagh219ec072020-11-09 11:11:26 +0000265 ext_type = optype_to_builtintype(op.type)
Michael McGeagh1eeea512020-09-30 14:23:09 +0100266 if op.type not in SupportedOperators.supported_operators:
Louis Verhaard5f2ea2f2020-10-15 08:39:44 +0200267 if op.type not in (Op.Placeholder, Op.SubgraphInput, Op.Const):
Michael McGeagh219ec072020-11-09 11:11:26 +0000268 print(f"Info: {ext_type} '{op.name}' is a CPU only op")
Tim Hall79d07d22020-04-27 18:20:16 +0100269 return False
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100270
Michael McGeagh65fd9982020-10-20 11:49:28 +0100271 for constraint in self.generic_constraints + self.specific_constraints[op.type]:
Michael McGeagh37ded342020-10-01 15:37:44 +0100272 valid, extra = constraint(op)
273 if not valid:
Michael McGeagh219ec072020-11-09 11:11:26 +0000274 print(f"Warning: {ext_type} '{op.name}' is not supported on the NPU. Placing on CPU instead")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100275 print(f" - {constraint.__doc__}")
Michael McGeagh37ded342020-10-01 15:37:44 +0100276 if extra:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100277 print(f" {extra}")
Michael McGeagh37ded342020-10-01 15:37:44 +0100278 return False
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100279
Tim Hall79d07d22020-04-27 18:20:16 +0100280 return True
281
Michael McGeagh37ded342020-10-01 15:37:44 +0100282 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100283 def constraint_tens_no_dynamic(op):
284 "Input(s) and Output tensors must not be dynamic"
285 valid = True
286 extra = []
287 tensors = [tens for tens in op.inputs + op.outputs if tens]
288 for tens in tensors:
289 if (tens.shape == []) and (tens.values is None):
290 valid = False
291 extra.append(tens.name)
292 extra = ", ".join(extra)
293 return valid, f"Op has dynamic tensor(s): {extra}"
294
295 @staticmethod
Michael McGeagh37ded342020-10-01 15:37:44 +0100296 def constraint_tens_defined_shape(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100297 "Input(s) and Output tensors must have a defined shape"
Michael McGeagh37ded342020-10-01 15:37:44 +0100298 valid = True
299 extra = []
Michael McGeagh184b2502020-10-09 17:19:52 +0100300 tensors = [tens for tens in op.inputs + op.outputs if tens]
301 for tens in tensors:
302 if not tens.has_fully_defined_shape():
303 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100304 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100305 return valid, ", ".join(extra)
Michael McGeagh37ded342020-10-01 15:37:44 +0100306
Michael McGeagh184b2502020-10-09 17:19:52 +0100307 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100308 def constraint_tens_output_scalar(op):
309 "Output tensors cannot be scalar"
310 ofm = op.ofm
311 valid = ofm.shape != []
312 return valid, f"Output Tensor '{ofm.name}' is scalar"
Michael McGeagh184b2502020-10-09 17:19:52 +0100313
314 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000315 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100316 def constraint_tens_input_scalar(cls, op):
317 "Scalar Input tensors are only valid for op type: {}"
Michael McGeagh184b2502020-10-09 17:19:52 +0100318 valid = True
319 extra = []
320 tensors = [tens for tens in op.inputs if tens]
321 for tens in tensors:
322 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
323 valid = False
324 extra.append(tens.name)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100325 extra = ", ".join(extra)
326 return valid, f"Op has scalar input tensor(s): {extra}"
Tim Hall79d07d22020-04-27 18:20:16 +0100327
Michael McGeagh37ded342020-10-01 15:37:44 +0100328 @staticmethod
329 def constraint_tens_shape_size(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100330 "Input(s) and Output tensors must not be greater than 4D"
Michael McGeagh37ded342020-10-01 15:37:44 +0100331 valid = True
332 extra = []
Michael McGeagh184b2502020-10-09 17:19:52 +0100333 tensors = [tens for tens in op.inputs + op.outputs if tens]
334 for tens in tensors:
335 if len(tens.shape) > 4:
336 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100337 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100338 return valid, ", ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100339
Michael McGeagh37ded342020-10-01 15:37:44 +0100340 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000341 @docstring_format_args([_list_formatter(supported_op_dtypes)])
Michael McGeagh37ded342020-10-01 15:37:44 +0100342 def constraint_tens_dtype(cls, op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100343 "Tensors must be of type: {}"
Michael McGeagh37ded342020-10-01 15:37:44 +0100344 valid = True
345 extra = []
346 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
Michael McGeagh65fd9982020-10-20 11:49:28 +0100347 if not tensors:
348 tensors = [tens for tens in op.inputs if tens]
Michael McGeagh37ded342020-10-01 15:37:44 +0100349 for tens in tensors:
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100350 if tens.dtype not in cls.supported_op_dtypes:
Michael McGeagh184b2502020-10-09 17:19:52 +0100351 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100352 extra.append(f"Tensor '{tens.name}' has data type: {tens.dtype}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100353 return valid, ", ".join(extra)
354
355 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000356 @docstring_format_args([_optype_formatter(supported_int32_tensor_ops)])
Michael McGeagh184b2502020-10-09 17:19:52 +0100357 def constraint_tens_int32_ops(cls, op):
358 "Tensors which are int32 are only valid when op type is: {}"
359 valid = True
360 extra = []
361 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
Michael McGeagh65fd9982020-10-20 11:49:28 +0100362 if not tensors:
363 tensors = [tens for tens in op.inputs if tens]
Michael McGeagh184b2502020-10-09 17:19:52 +0100364 for tens in tensors:
365 if (tens.dtype == DataType.int32) and (op.type not in cls.supported_int32_tensor_ops):
366 valid = False
367 extra.append(tens.name)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100368 extra = ", ".join(extra)
369 return valid, f"Op has int32 tensor(s): {extra}"
Andreas Nevalaineneadb1662020-09-01 15:36:26 +0200370
Michael McGeagh37ded342020-10-01 15:37:44 +0100371 @classmethod
372 @docstring_format_args(tens_dim_range)
373 def constraint_tens_dimension(cls, op):
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100374 "Tensor dimensions must be in the range [{}, {}]"
Michael McGeagh37ded342020-10-01 15:37:44 +0100375 tens_min, tens_max = cls.tens_dim_range
376 valid = True
377 extra = []
378 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
Michael McGeagh65fd9982020-10-20 11:49:28 +0100379 if not tensors:
380 tensors = [tens for tens in op.inputs if tens]
Michael McGeagh37ded342020-10-01 15:37:44 +0100381 for tens in tensors:
Michael McGeagh184b2502020-10-09 17:19:52 +0100382 if not all(tens_min <= dim <= tens_max for dim in tens.shape):
383 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100384 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100385 return valid, ", ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100386
Dwight Lidman8359a472020-09-28 15:53:40 +0200387 @staticmethod
388 def constraint_tens_quant_none_check(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100389 "Input(s), Output and Weight tensors must have quantization parameters"
Dwight Lidman8359a472020-09-28 15:53:40 +0200390 valid = True
391 extra = []
392 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
393 for tens in tensors:
394 if tens.quantization is None:
395 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100396 extra.append(tens.name)
397 extra = ", ".join(extra)
398 return valid, f"Op has tensors with missing quantization parameters: {extra}"
Dwight Lidman8359a472020-09-28 15:53:40 +0200399
Michael McGeagh184b2502020-10-09 17:19:52 +0100400 @staticmethod
401 def constraint_tens_quant_scale(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100402 "Input(s), Output and Weight tensors with quantization scales must be finite"
Michael McGeagh184b2502020-10-09 17:19:52 +0100403 valid = True
404 extra = []
405 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
406 for tens in tensors:
407 if (tens.quantization.scale_f32 is not None) and np.isinf(tens.quantization.scale_f32).any():
408 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100409 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100410 return valid, ", ".join(extra)
411
412 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000413 @docstring_format_args([_optype_formatter(per_axis_quant_ops)])
Dwight Lidmanc7187432020-11-16 17:40:46 +0100414 def constraint_tens_quant_per_axis(cls, op):
415 "Per-axis quantization is only supported for the following op types: {}"
416 valid = True
417 extra = []
418 if op.type not in cls.per_axis_quant_ops:
419 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
420 for tens in tensors:
421 if tens.quantization.is_per_axis():
422 valid = False
423 extra.append(tens.name)
424 return valid, "The following tensor(s) have per-axis quantization parameters: " + ", ".join(extra)
425
Dwight Lidman0dd21c72020-11-24 13:45:50 +0100426 @staticmethod
427 def constraint_fc_output_2d(op):
428 "The output tensor(s) must have 2D shape"
429 valid = True
430 extra = []
431 for tens in op.outputs:
432 if len(tens.shape) != 2:
433 valid = False
434 extra.append(f"Tensor '{tens.name}' is {len(tens.shape)}D")
435 return valid, ", ".join(extra)
436
Dwight Lidmanc7187432020-11-16 17:40:46 +0100437 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000438 @docstring_format_args([_optype_formatter(supported_fused_activations)])
Michael McGeagh184b2502020-10-09 17:19:52 +0100439 def constraint_faf(cls, op):
440 "The fused activation function (if present) must be one of type: {}"
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100441 if op.activation is None:
442 res = True, "Op has no fused activation function"
443 else:
444 faf = op.activation.op_type
445 valid = faf in cls.supported_fused_activations
446 res = valid, f"Op has its fused activation function as: {faf}"
447 return res
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100448
449 @staticmethod
450 def constraint_stride_type(op):
451 "Stride values for both width and height must be integer types"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100452 w, h = op.get_kernel_stride()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100453 valid = is_integer(w) and is_integer(h)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100454 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
Michael McGeagh184b2502020-10-09 17:19:52 +0100455
Michael McGeagh1eeea512020-09-30 14:23:09 +0100456 @classmethod
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100457 @docstring_format_args(stride_range)
458 def constraint_stride_range(cls, op):
459 "Stride values for both width and height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100460 w, h = op.get_kernel_stride()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100461 stride_min, stride_max = cls.stride_range
462 valid = (stride_min <= w <= stride_max) and (stride_min <= h <= stride_max)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100463 return valid, f"Op has stride WxH as: {w}x{h}"
Tim Hall79d07d22020-04-27 18:20:16 +0100464
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100465 @staticmethod
466 def constraint_dilation_type(op):
467 "Dilation factor values for both width and height must be integer types"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100468 w, h = op.get_kernel_dilation()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100469 valid = is_integer(w) and is_integer(h)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100470 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
Tim Hall79d07d22020-04-27 18:20:16 +0100471
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100472 @classmethod
473 @docstring_format_args(dilation_range)
474 def constraint_dilation_range(cls, op):
475 "Dilation factor values for both width and height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100476 w, h = op.get_kernel_dilation()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100477 dilation_min, dilation_max = cls.dilation_range
478 valid = (dilation_min <= w <= dilation_max) and (dilation_min <= h <= dilation_max)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100479 return valid, f"Op has dilation factor WxH as: {w}x{h}"
Tim Hall79d07d22020-04-27 18:20:16 +0100480
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100481 @classmethod
482 @docstring_format_args(dilated_height_range)
483 def constraint_dilated_height_range(cls, op):
484 "Dilated kernel height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100485 h = op.kernel.area_height()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100486 dilated_height_min, dilated_height_max = cls.dilated_height_range
487 valid = dilated_height_min <= h <= dilated_height_max
Michael McGeagh65fd9982020-10-20 11:49:28 +0100488 return valid, f"Op has dilated kernel height as: {h}"
Jacob Bohlin49d92122020-08-19 14:36:46 +0200489
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100490 @classmethod
491 @docstring_format_args(dilated_product_range)
492 def constraint_dilated_product_range(cls, op):
493 "Product of dilated kernel width and height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100494 product = op.kernel.area_width() * op.kernel.area_height()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100495 dilated_product_min, dilated_product_max = cls.dilated_product_range
496 valid = dilated_product_min <= product <= dilated_product_max
Michael McGeagh65fd9982020-10-20 11:49:28 +0100497 return valid, f"Op has product of dilated kernel width and height as: {product}"
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200498
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100499 @staticmethod
500 def constraint_weights_type(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100501 "Weight tensor must be 8-bit"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100502 weights = op.weights
503 valid = weights.element_size() == 1
Michael McGeagh65fd9982020-10-20 11:49:28 +0100504 return valid, f"Tensor '{weights.name}' is {int(weights.element_size() * 8)}-bit"
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200505
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100506 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100507 def constraint_weights_const(op):
508 "Weight tensor must be constant"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100509 weights = op.weights
510 valid = weights.values is not None
Michael McGeagh65fd9982020-10-20 11:49:28 +0100511 return valid, f"Tensor '{weights.name}' has non-constant values"
Andreas Nevalainen8854dc92020-09-24 13:43:00 +0200512
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100513 @classmethod
514 @docstring_format_args([weights_limit])
515 def constraint_weights_limit(cls, op):
516 "The sum of the weights cannot exceed {}"
517 weights = op.weights
518 values = weights.quant_values.astype(np.int64) - weights.quantization.zero_point
519 limit = np.amax(np.sum(np.absolute(values), axis=(0, 1, 2)))
520 valid = limit <= cls.weights_limit
Michael McGeagh65fd9982020-10-20 11:49:28 +0100521 return valid, f"Tensor '{weights.name}' has the sum of weights: {limit}"
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200522
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100523 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000524 @docstring_format_args([_list_formatter(supported_bias_dtypes)])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100525 def constraint_bias_type(cls, op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100526 "Optional Bias tensor must be of type: {}"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100527 bias = op.bias
528 if bias:
529 valid = bias.dtype in cls.supported_bias_dtypes
Michael McGeagh65fd9982020-10-20 11:49:28 +0100530 return valid, f"Tensor '{bias.name}' has data type: {bias.dtype}"
531 return True, "Op has no bias tensor"
Tim Hall79d07d22020-04-27 18:20:16 +0100532
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100533 @staticmethod
534 def constraint_bias_40bit(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100535 "Optional Bias tensor values must fit within 40-bits"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100536 bias = op.bias
Fredrik Svedbergbdf09f92020-11-18 11:30:21 +0100537 if bias and bias.dtype == DataType.int64 and bias.quant_values is not None:
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100538 valid = all(len(bin(quant_value)[2:]) <= 40 for quant_value in bias.quant_values)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100539 return valid, f"Tensor '{bias.name}' has values larger than 40-bits"
540 return True, "Op has no bias tensor, or it fits in 40-bit"
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200541
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100542 @staticmethod
543 def constraint_batch_size(op):
544 "IFM Tensor batch size must be 1"
545 ifm = op.ifm
546 valid = ifm.shape[0] == 1
Michael McGeagh65fd9982020-10-20 11:49:28 +0100547 return valid, f"Tensor '{ifm.name}' has batch size: {ifm.shape[0]}"
548
549 @staticmethod
550 def constraint_quant_scale_inf(op):
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100551 "Input and Output tensors must have quantization scales that fit within float32 precision"
552 if op.ofm is not None and op.ofm.is_quantized():
553 ofm_scale = op.ofm.quantization.scale_f32
554 if ofm_scale < np.finfo(np.float32).tiny:
555 return (
556 False,
557 f"The quantization scale of the output tensor is {ofm_scale}, "
558 + f"minimum supported is: {np.finfo(np.float32).tiny}",
559 )
560 if op.ifm is not None and op.ifm.is_quantized():
561 ifm_scale = op.ifm.quantization.scale_f32
562 if np.isinf(ifm_scale / ofm_scale):
563 return (
564 False,
565 f"IFM scale divided by OFM scale is infinite, ifm_scale={ifm_scale} ofm_scale={ofm_scale}",
566 )
567 return True, "Op's quantization is ok"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100568
569 @staticmethod
570 def constraint_depth_multiplier(op):
571 "For depth multipliers > 1, IFM channels must be 1 and OFM channels must be equal to the depth multiplier"
572 depth_multiplier = op.attrs.get("depth_multiplier", 1)
573 if depth_multiplier > 1:
574 ifm_channels = op.ifm.shape[3]
575 ofm_channels = op.ofm.shape[3]
576 valid = (ifm_channels == 1) and (ofm_channels == depth_multiplier)
577 extra = (
578 f"Op has ifm_channels={ifm_channels}, ofm_channels={ofm_channels}"
579 f" and depth_multiplier={depth_multiplier}"
580 )
581 return valid, extra
582 return True, "Op has depth_multiplier=1"
583
584 @staticmethod
585 def constraint_tconv_stride(op):
586 "Stride values for both width and height must be 2"
587 w = op.kernel.stride.x
588 h = op.kernel.stride.y
589 valid = (w == 2) and (h == 2)
590 return valid, f"Op has stride WxH as: {w}x{h}"
591
592 @staticmethod
593 def constraint_tconv_same(op):
594 "SAME padding: OFM dimensions must equal IFM dimensions multiplied by stride"
Michael McGeagh16895482020-12-14 15:51:20 +0000595 if op.attrs["padding"] == Padding.SAME:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100596 w = op.kernel.stride.x
597 h = op.kernel.stride.y
598 ifm_shape = op.ifm.shape
599 ofm_shape = op.ofm.shape
600 valid = (ofm_shape[1] == (ifm_shape[1] * h)) and (ofm_shape[2] == (ifm_shape[2] * w))
601 return valid, f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape} and stride WxH as {w}x{h}"
602 return True, "Op has padding=VALID"
603
604 @staticmethod
605 def constraint_tconv_valid(op):
606 """VALID padding: OFM dimensions must equal IFM dimensions multiplied by stride,
607 minus difference between kernel size and stride"""
Michael McGeagh16895482020-12-14 15:51:20 +0000608 if op.attrs["padding"] == Padding.VALID:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100609 s_w = op.kernel.stride.x
610 s_h = op.kernel.stride.y
611 k_w = op.kernel.width
612 k_h = op.kernel.height
613 ifm_shape = op.ifm.shape
614 ofm_shape = op.ofm.shape
615 height_check = ofm_shape[1] == (ifm_shape[1] * s_h + max(k_h - s_h, 0))
616 width_check = ofm_shape[2] == (ifm_shape[2] * s_w + max(k_w - s_w, 0))
617 valid = height_check and width_check
618 extra = (
619 f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape},"
620 f" stride WxH as {s_w}x{s_h} and kernel WxH as {k_w}x{k_h}"
621 )
622 return valid, extra
623 return True, "Op has padding=SAME"
624
625 @staticmethod
626 def constraint_matching_in_out_types(op):
627 "IFM and OFM data types must match"
628 ifm_dtype = op.ifm.dtype
629 ofm_dtype = op.ofm.dtype
630 valid = ifm_dtype == ofm_dtype
631 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
632
633 @staticmethod
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100634 def constraint_beta_value_range(op):
635 "Beta value needs to be positive"
636 beta = op.attrs.get("beta", 1.0)
637 valid = beta >= 0
638 return valid, f"Op has beta={beta}"
639
640 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100641 def constraint_filter_type(op):
642 "Kernel filter values for both width and height must be integer types"
643 w = op.kernel.width
644 h = op.kernel.height
645 valid = is_integer(w) and is_integer(h)
646 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
647
648 @classmethod
649 @docstring_format_args(filter_range)
650 def constraint_filter_range(cls, op):
651 "Kernel filter values for both width and height must be in the range [{}, {}]"
Michael McGeagh16895482020-12-14 15:51:20 +0000652 if op.attrs["padding"] == Padding.SAME:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100653 w = op.kernel.width
654 h = op.kernel.height
655 filter_min, filter_max = cls.filter_range
656 valid = (filter_min <= w <= filter_max) and (filter_min <= h <= filter_max)
657 return valid, f"Op has kernel filter WxH as: {w}x{h}"
658 return True, "Op has padding=VALID"
659
660 @classmethod
661 @docstring_format_args(filter_height_range)
662 def constraint_filter_height_range(cls, op):
663 "Kernel filter height must be in the range [{}, {}]"
664 h = op.kernel.height
665 filter_height_min, filter_height_max = cls.filter_height_range
666 valid = filter_height_min <= h <= filter_height_max
667 return valid, f"Op has kernel filter height as: {h}"
668
669 @classmethod
670 @docstring_format_args(filter_product_range)
671 def constraint_filter_product_range(cls, op):
672 "Product of kernel filter width and height must be in the range [{}, {}]"
673 product = op.kernel.elements_wh()
674 filter_product_min, filter_product_max = cls.filter_product_range
675 valid = filter_product_min <= product <= filter_product_max
676 return valid, f"Op has product of kernel filter width and height as: {product}"
677
678 @staticmethod
679 @docstring_format_args(filter_height_range)
680 def constraint_filter_height_range_valid_pad(op):
681 "VALID padding: Kernel filter height must be in the range [{}, {}]"
Michael McGeagh16895482020-12-14 15:51:20 +0000682 if op.attrs["padding"] == Padding.VALID:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100683 return SupportedOperators.constraint_filter_height_range(op)
684 return True, "Op has padding=SAME"
685
686 @staticmethod
687 @docstring_format_args(filter_product_range)
688 def constraint_filter_product_range_valid_pad(op):
689 "VALID padding: Product of kernel filter width and height must be in the range [{}, {}]"
Michael McGeagh16895482020-12-14 15:51:20 +0000690 if op.attrs["padding"] == Padding.VALID:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100691 return SupportedOperators.constraint_filter_product_range(op)
692 return True, "Op has padding=SAME"
693
694 @staticmethod
695 def constraint_resize(op):
696 """The width and height of the IFM and OFM must match one of the following criteria:
697 IFM W and H must both be 1
698 IFM must match OFM
699 OFM W and H must be 2x IFM -1, if align_corners is True
700 OFM W and H must be 2x IFM, if align_corners is False"""
701 # Easier to start with False condition as very few cases result in a supported resize
702 valid = False
703 ifm_shape = op.ifm.shape
704 ofm_shape = op.ofm.shape
705 align_corners = op.attrs.get("align_corners", False)
706 if len(ifm_shape) == 4:
707 # Valid if IFM W and H are both 1, or IFM and OFM shape are the same
708 if ((ifm_shape[1] == 1) and (ifm_shape[2] == 1)) or (ifm_shape == ofm_shape):
709 valid = True
710 else:
711 upscaled_shape = np.array(ifm_shape[1:3])
712 out_shape = np.array(ofm_shape[1:3])
713 while (upscaled_shape < out_shape).all():
714 upscaled_shape *= 2
715 if align_corners:
716 upscaled_shape -= 1
717 # Valid if OFM is 2x IFM (-1 for align corners)
718 if np.array_equal(out_shape, upscaled_shape):
719 valid = True
720 break
721 return valid, f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape} and align_corners={align_corners}"
722
723 @staticmethod
724 def constraint_matching_shapes(op):
725 "IFM and OFM shapes must match"
726 ifm_shape = op.ifm.shape
727 ofm_shape = op.ofm.shape
728 valid = ifm_shape == ofm_shape
729 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
730
731 @staticmethod
732 def constraint_splitv_inferred(op):
733 "Only one size is allowed to be inferred"
Jacob Bohline3de4e52020-11-27 14:52:06 +0100734 sizes = op.inputs[1].values
Michael McGeagh65fd9982020-10-20 11:49:28 +0100735 valid = np.count_nonzero(sizes == -1) <= 1
736 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
737
738 @staticmethod
739 def constraint_axis_exists(op):
740 "Axis attribute must exist"
741 axis = op.attrs.get("axis")
742 valid = axis is not None
743 return valid, f"Op has axis={axis}"
744
745 @staticmethod
746 def constraint_axis_valid(op):
747 "Axis attribute must be in the range [0, <ofm_dimensions>)"
748 dims = len(op.ofm.shape)
749 axis = op.attrs["axis"]
750 axis += dims if axis < 0 else 0
751 valid = 0 <= axis < dims
752 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
753
754 @staticmethod
755 def constraint_matching_dimensionality(op):
756 "All Input dimensionalities must match OFM dimensionality"
757 valid = True
758 extra = []
759 ofm_dim = len(op.ofm.shape)
760 tensors = [tens for tens in op.inputs if tens]
761 for tens in tensors:
762 dim = len(tens.shape)
763 if dim != ofm_dim:
764 valid = False
765 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
766 extra = ", ".join(extra)
767 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
768
769 @staticmethod
770 def constraint_valid_dimensions(op):
771 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
772 valid = True
773 extra = []
774 ofm_shape = op.ofm.shape
775 ofm_dim = len(ofm_shape)
776 axis = op.attrs["axis"]
777 axis += ofm_dim if axis < 0 else 0
778 tensors = [tens for tens in op.inputs if tens]
779 for tens in tensors:
780 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
781 valid = False
782 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
783 extra = ", ".join(extra)
784 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
785
786 @staticmethod
787 def constraint_stridedslice_input_count(op):
788 "Exactly 4 Input tensors are required"
789 inputs = len(op.inputs)
790 valid = inputs == 4
791 return valid, f"Op has {inputs} inputs"
792
793 @staticmethod
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100794 def constraint_pad_input_count(op):
795 "Number of input tensors must be exactly 2"
796 inputs = len(op.inputs)
797 valid = inputs == 2
798 return valid, f"Op has {inputs} inputs"
799
800 @staticmethod
801 def constraint_pad_shape(op):
802 "The padding tensor must have the shape [4,2]"
803 valid = op.inputs[1].shape == [4, 2]
804 return valid, f"The pad tensor has the shape: {op.inputs[1].shape}"
805
806 @classmethod
807 @docstring_format_args([_list_formatter(supported_pad_dtypes)])
808 def constraint_pad_type(cls, op):
809 "Pad tensor must be of type: {}"
810 pad_tensor = op.inputs[1]
811 valid = pad_tensor.dtype in cls.supported_pad_dtypes
812 return valid, f"Tensor '{pad_tensor.name}' has data type: {pad_tensor.dtype}"
813
814 @staticmethod
815 def constraint_padding_dimensions(op):
816 "The pad tensor can only pad width and height"
817 pad_tensor = op.inputs[1].values
818 valid = sum(pad_tensor[0, :]) + sum(pad_tensor[-1, :]) == 0
819 return valid, f"First dimension padding: {pad_tensor[0,:]}, last dimension padding: {pad_tensor[-1,:]}"
820
821 @staticmethod
822 def constraint_pad_constant(op):
823 pad_tensor = op.inputs[1].values
824 valid = pad_tensor is not None
825 return valid, f"Op has non-constant padding tensor: {op.inputs[1].values}"
826
827 @classmethod
828 @docstring_format_args([_optype_formatter(supported_pad_consumers)])
829 def constraint_pad_ofm(cls, op):
830 "Must be followed by one of the following operator types: {}"
831 consumers = op.ofm.consumers()
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100832 unsupported_consumers = [
833 cons.type
834 for cons in consumers
835 if cons is not None
836 if cons.type not in cls.supported_pad_consumers or cons.attrs["padding"] != Padding.VALID
837 ] + [None for cons in consumers if cons is None]
838 none_string = ", ".join(["NoneType" for cons in consumers if cons is None])
839 valid = len(unsupported_consumers) == 0
840 return valid, f"PAD operator is followed by: {_optype_formatter(unsupported_consumers)+none_string}"
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100841
842 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100843 def constraint_stridedslice_inputs_const(op):
844 "Begin, End and Stride Input tensors must be constant"
845 valid = True
846 extra = []
847 _, begin, end, strides = op.inputs
848 if begin.values is None:
849 valid = False
850 extra.append(f"Begin tensor '{begin.name}'")
851 if end.values is None:
852 valid = False
853 extra.append(f"End tensor '{end.name}'")
854 if strides.values is None:
855 valid = False
856 extra.append(f"Stride tensor '{strides.name}'")
857 extra = ", ".join(extra)
858 return valid, f"Op has non-constant tensors: {extra}"
859
860 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100861 def constraint_stridedslice_stride_values(op):
862 "All Strides values must be 1"
863 strides = op.inputs[3]
864 valid = all(stride == 1 for stride in strides.values)
865 return valid, f"Op has strides values {strides.values}"
Tim Hall79d07d22020-04-27 18:20:16 +0100866
Michael McGeagh65fd9982020-10-20 11:49:28 +0100867 @staticmethod
868 def constraint_ellipsis_mask(op):
869 "ellipsis_mask must be 0"
870 ellipsis = op.attrs["ellipsis_mask"]
871 valid = ellipsis == 0
872 return valid, f"Op has ellipsis mask as: {ellipsis}"
Jacob Bohlincf7da102020-05-20 09:03:40 +0200873
Michael McGeagh65fd9982020-10-20 11:49:28 +0100874 @staticmethod
875 def constraint_axis_masks(op):
876 "new_axis_mask and shrink_axis_mask cannot both be set"
877 new_axis = op.attrs["new_axis_mask"]
878 shrink_axis = op.attrs["shrink_axis_mask"]
879 valid = (new_axis == 0) or (shrink_axis == 0)
880 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
Jacob Bohlincf7da102020-05-20 09:03:40 +0200881
Michael McGeagh65fd9982020-10-20 11:49:28 +0100882 @staticmethod
883 def constraint_slice_ranges(op):
884 "Slice 'end' values must be greater than 'begin' values"
885 ifm, begin, end, _ = op.inputs
886 # Calculate offset begin/end
887 offset_begin = get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
888 offset_end = get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
889 # Check "end - begin" doesn't result in any zero or negative elements
890 valid = all((e - b) > 0 for b, e in zip(offset_begin, offset_end))
891 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
Tim Hall79d07d22020-04-27 18:20:16 +0100892
Michael McGeagh65fd9982020-10-20 11:49:28 +0100893 @staticmethod
894 def constraint_matching_inputs_types(op):
895 "Both Input data types must match"
896 ifm_dtype = op.ifm.dtype
897 ifm2_dtype = op.ifm2.dtype
898 valid = ifm_dtype == ifm2_dtype
899 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100900
Michael McGeagh65fd9982020-10-20 11:49:28 +0100901 @staticmethod
902 def constraint_matching_signed(op):
903 "For IFM that are signed, OFM must also be signed"
904 valid = True
905 ifm_dtype = op.ifm.dtype
906 ofm_dtype = op.ofm.dtype
907 if ifm_dtype.type & BaseType.Signed:
908 valid = bool(ofm_dtype.type & BaseType.Signed)
909 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100910
Michael McGeagh65fd9982020-10-20 11:49:28 +0100911 @staticmethod
912 def constraint_unsigned_valid(op):
913 "For IFM that are unsigned, OFM must either be the same type or int32"
914 valid = True
915 ifm_dtype = op.ifm.dtype
916 ofm_dtype = op.ofm.dtype
917 if ifm_dtype.type & BaseType.Unsigned:
918 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
919 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100920
Michael McGeagh65fd9982020-10-20 11:49:28 +0100921 @staticmethod
922 def constraint_inputs_int32(op):
923 "Both Input data types must be int32"
924 ifm_dtype = op.ifm.dtype
925 ifm2_dtype = op.ifm2.dtype
926 valid = (ifm_dtype == DataType.int32) and (ifm2_dtype == DataType.int32)
927 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100928
Michael McGeagh65fd9982020-10-20 11:49:28 +0100929 @staticmethod
930 def constraint_output_int32(op):
931 "OFM must be int32"
932 ofm_dtype = op.ofm.dtype
933 valid = ofm_dtype == DataType.int32
934 return valid, f"Op has ofm_dtype={ofm_dtype}"
Dwight Lidman42fed942020-05-29 09:37:03 +0200935
Michael McGeagh65fd9982020-10-20 11:49:28 +0100936 @staticmethod
937 def constraint_matching_quantization_parameters(op):
938 "Both Input quantization parameters must match OFM quantization parameters"
939 valid = True
940 extra = []
941 if not check_quantized_tens_scaling_equal(op.ofm, op.ifm):
942 valid = False
943 extra.append(op.ifm.name)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100944 if op.ifm2 is not None and not check_quantized_tens_scaling_equal(op.ofm, op.ifm2):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100945 valid = False
946 extra.append(op.ifm2.name)
947 extra = ", ".join(extra)
948 return valid, f"Op has tensors with different quantization parameters to the OFM '{op.ofm.name}': {extra}"
Dwight Lidman8359a472020-09-28 15:53:40 +0200949
Michael McGeagh65fd9982020-10-20 11:49:28 +0100950 @staticmethod
951 def constraint_elemwise_batch_size(op):
952 "Batch size must be 1 for Input tensors with more than 2 dimensions"
953 valid = True
954 extra = []
955 for tens in (op.ifm, op.ifm2):
956 # Unary ops have ifm2 as None
957 if tens is not None:
958 if (len(tens.shape) > 2) and (tens.shape[0] != 1):
959 valid = False
960 extra.append(tens.name)
961 extra = ", ".join(extra)
962 return valid, f"Op has invalid input tensors: {extra}"
Jacob Bohlin49d92122020-08-19 14:36:46 +0200963
Michael McGeagh65fd9982020-10-20 11:49:28 +0100964 @staticmethod
965 def constraint_matching_either_shapes(op):
966 "At least one Input's shape must match the OFM's shape"
967 ifm_shape = op.ifm.shape
968 ifm2_shape = op.ifm2.shape if op.ifm2 else None
969 ofm_shape = op.ofm.shape
970 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
971 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200972
Michael McGeagh65fd9982020-10-20 11:49:28 +0100973 @staticmethod
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100974 def constraint_broadcast_shapes(op):
975 "Broadcasting is only allowed for rank indices with dimension 1, from either IFM1 or IFM2"
976 ifm_shape = op.ifm.shape
977 ifm2_shape = op.ifm2.shape if op.ifm2 else None
978 ofm_shape = op.ofm.shape
979 valid = True
980 if ifm_shape is not None and ifm2_shape is not None:
981 # align trailing dimensions
982 size = min(len(ifm_shape), len(ifm2_shape))
983 for i, i2, o in zip(ifm_shape[-size:], ifm2_shape[-size:], ofm_shape[-size:]):
984 mi = max(i, i2)
985 # Input dimensions should match or one should be of dimension 1
986 # Output dimension should match the largest input dimension, together
987 # with constraint_match_either_shapes ensures broadcast from only one input
988 if not (i == i2 or i == 1 or i2 == 1) or o != mi:
989 valid = False
990 break
991
992 return valid, f"Op has ifm_shape={ifm_shape} and ifm2_shape={ifm2_shape}"
993
994 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100995 def constraint_alpha_valid(op):
996 "Alpha must not be negative"
997 alpha = op.attrs["alpha"]
998 valid = alpha >= 0
999 return valid, f"Op has alpha={alpha}"