blob: deae75a2e787232851e751706ed262358b241654 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Michael McGeagh1f951fc2020-10-14 09:30:02 +010018from collections import defaultdict
19
Charles Xu87c13502020-08-06 12:17:26 +020020import numpy as np
21
Tim Hallc30f4952020-06-15 20:47:35 +010022from .data_type import BaseType
23from .data_type import DataType
Dwight Lidman8359a472020-09-28 15:53:40 +020024from .numeric_util import is_integer
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020025from .operation import get_slice_offsets
Louis Verhaardaee5d752020-09-30 09:01:52 +020026from .operation import Op
Tim Hall93582962020-09-09 21:58:15 +010027from .tensor import check_quantized_tens_scaling_equal
Michael McGeagh837dc1b2020-11-10 12:38:25 +000028from .tflite_mapping import BUILTIN_OPERATOR_UNKNOWN
Michael McGeagh219ec072020-11-09 11:11:26 +000029from .tflite_mapping import optype_to_builtintype
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020030
31
Michael McGeagh37ded342020-10-01 15:37:44 +010032# Custom decorator function to allow formatting docstrings containing "{}"
33def docstring_format_args(args):
34 def docstring(func):
35 func.__doc__ = func.__doc__.format(*args)
36 return func
37
38 return docstring
39
40
Michael McGeagh34d29172020-11-25 12:36:23 +000041def _list_formatter(arg):
42 # Order and join into a string representation
43 return ", ".join(sorted(map(str, arg)))
44
45
Michael McGeagh837dc1b2020-11-10 12:38:25 +000046def _optype_formatter(op_list):
47 # Convert internal op types to external names
48 output = map(optype_to_builtintype, op_list)
49 # Remove UNKNOWNs
50 output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN)
Michael McGeagh34d29172020-11-25 12:36:23 +000051 return _list_formatter(output)
Michael McGeagh837dc1b2020-11-10 12:38:25 +000052
53
Tim Hall79d07d22020-04-27 18:20:16 +010054class SupportedOperators:
Michael McGeagh1eeea512020-09-30 14:23:09 +010055 # Categorised lists of supported operators
Louis Verhaardaee5d752020-09-30 09:01:52 +020056 npu_pre_ops = set((Op.SplitSliceRead,))
57 convolution_ops = set((Op.Conv2DBias, Op.Conv2D, Op.QuantizedConv2D,))
58 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
59 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
Michael McGeagh1f951fc2020-10-14 09:30:02 +010060 convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops
Louis Verhaardaee5d752020-09-30 09:01:52 +020061 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
62 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
63 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
64 resizing_ops = set((Op.ResizeBilinear,))
65 fc_vector_products = set((Op.QuantizedMatMul, Op.MatMul, Op.FullyConnected,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010066 mac_main_ops = (
67 # RNN/LSTM/GRU
Louis Verhaardaee5d752020-09-30 09:01:52 +020068 set((Op.BlockLSTM,))
Michael McGeagh1f951fc2020-10-14 09:30:02 +010069 # conv/depthwiseconv/transposeconv
70 | convolution_like_ops
Michael McGeagh1eeea512020-09-30 14:23:09 +010071 # pooling
72 | pooling_ops
73 # resizing/upscaling
74 | resizing_ops
75 # FC layers
76 | fc_vector_products
77 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020078 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
79 binary_elem_wise_min_max_ops = set((Op.Minimum, Op.Maximum,))
80 binary_elem_wise_shift_ops = set((Op.SHL, Op.SHR,))
81 binary_elem_wise_add_mul_sub = set((Op.Add, Op.Mul, Op.Sub,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010082 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
83 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Michael McGeagh37ded342020-10-01 15:37:44 +010084 supported_int32_tensor_ops = (
Louis Verhaardaee5d752020-09-30 09:01:52 +020085 set((Op.ReduceSum, Op.CLZ,)) | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
Michael McGeagh37ded342020-10-01 15:37:44 +010086 )
Michael McGeagh65fd9982020-10-20 11:49:28 +010087 relu_ops = Op.op_set(Op.is_relu_op)
88 activation_ops = relu_ops | set((Op.Tanh, Op.Sigmoid, Op.Softmax,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010089 npu_post_ops = (
Michael McGeagh1eeea512020-09-30 14:23:09 +010090 # activation functions
Louis Verhaardaee5d752020-09-30 09:01:52 +020091 activation_ops
92 # concatenation write direction
93 | set((Op.ConcatSliceWrite,))
94 # Quantization
95 | set((Op.Quantize,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010096 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020097 split_ops = set((Op.Split, Op.SplitV, Op.StridedSlice, Op.Slice, Op.UnpackReshaped, Op.Unpack,))
98 concat_ops = set((Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack,))
Michael McGeagha648aa92020-11-18 15:44:05 +000099 memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape,)) | concat_ops | split_ops
Louis Verhaardaee5d752020-09-30 09:01:52 +0200100 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV,))
Dwight Lidmanc7187432020-11-16 17:40:46 +0100101 per_axis_quant_ops = convolution_like_ops # per-axis/channel quantization only currently supported for conv ops
Michael McGeagh65fd9982020-10-20 11:49:28 +0100102 supported_fused_activations = relu_ops | set((Op.Tanh, Op.Sigmoid, Op.LUT,))
Michael McGeagh1eeea512020-09-30 14:23:09 +0100103 supported_operators = npu_pre_ops | mac_main_ops | elem_wise_main_ops | npu_post_ops | memory_only_ops
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100104 # Supported data types
105 supported_op_dtypes = set((DataType.uint8, DataType.int8, DataType.int16, DataType.int32))
106 supported_bias_dtypes = set((DataType.int32, DataType.int64))
Michael McGeagh37ded342020-10-01 15:37:44 +0100107 # Defined ranges for allowed values:
108 tens_dim_range = (1, 65535)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100109 stride_range = (1, 3)
110 dilation_range = (1, 2)
111 dilated_height_range = (1, 64)
112 dilated_product_range = (1, 64 * 64)
113 weights_limit = 127 * 65536
Michael McGeagh65fd9982020-10-20 11:49:28 +0100114 filter_range = (1, 8)
115 filter_height_range = (1, 256)
116 filter_product_range = (1, 256 * 256)
Michael McGeagh1eeea512020-09-30 14:23:09 +0100117
Fredrik Svedberg880e7352020-08-25 11:31:47 +0200118 def __init__(self):
Michael McGeagh184b2502020-10-09 17:19:52 +0100119 # Setup the generic constraints. Note: the order matters
Michael McGeagh37ded342020-10-01 15:37:44 +0100120 self.generic_constraints = []
Michael McGeagh65fd9982020-10-20 11:49:28 +0100121 self.generic_constraints.append(SupportedOperators.constraint_tens_no_dynamic)
Michael McGeagh37ded342020-10-01 15:37:44 +0100122 self.generic_constraints.append(SupportedOperators.constraint_tens_defined_shape)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100123 self.generic_constraints.append(SupportedOperators.constraint_tens_output_scalar)
124 self.generic_constraints.append(SupportedOperators.constraint_tens_input_scalar)
Michael McGeagh37ded342020-10-01 15:37:44 +0100125 self.generic_constraints.append(SupportedOperators.constraint_tens_shape_size)
126 self.generic_constraints.append(SupportedOperators.constraint_tens_dtype)
Michael McGeagh184b2502020-10-09 17:19:52 +0100127 self.generic_constraints.append(SupportedOperators.constraint_tens_int32_ops)
Michael McGeagh37ded342020-10-01 15:37:44 +0100128 self.generic_constraints.append(SupportedOperators.constraint_tens_dimension)
Dwight Lidman8359a472020-09-28 15:53:40 +0200129 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_none_check)
Michael McGeagh184b2502020-10-09 17:19:52 +0100130 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_scale)
Dwight Lidmanc7187432020-11-16 17:40:46 +0100131 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_per_axis)
Michael McGeagh184b2502020-10-09 17:19:52 +0100132 self.generic_constraints.append(SupportedOperators.constraint_faf)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100133
Michael McGeagh65fd9982020-10-20 11:49:28 +0100134 # Setup specific constraints. Note: the order matters
135 self.specific_constraints = defaultdict(list)
136
137 # Conv-like checks:
138 for op_type in SupportedOperators.convolution_like_ops:
139 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_type)
140 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_range)
141 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilation_type)
142 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilation_range)
143 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilated_height_range)
144 self.specific_constraints[op_type].append(SupportedOperators.constraint_dilated_product_range)
145 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_type)
146 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_const)
147 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_limit)
148 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_type)
149 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_40bit)
150 self.specific_constraints[op_type].append(SupportedOperators.constraint_batch_size)
151 # Depthwise Conv specific checks:
152 for op_type in SupportedOperators.depthwise_convolution_ops:
153 self.specific_constraints[op_type].append(SupportedOperators.constraint_depth_multiplier)
154 # Transpose Conv specific checks:
155 for op_type in SupportedOperators.transpose_convolution_ops:
156 self.specific_constraints[op_type].append(SupportedOperators.constraint_tconv_stride)
157 self.specific_constraints[op_type].append(SupportedOperators.constraint_tconv_same)
158 self.specific_constraints[op_type].append(SupportedOperators.constraint_tconv_valid)
159
160 # Pooling checks:
161 for op_type in SupportedOperators.pooling_ops:
162 self.specific_constraints[op_type].append(SupportedOperators.constraint_batch_size)
163 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_type)
164 self.specific_constraints[op_type].append(SupportedOperators.constraint_stride_range)
165 # AVG pooling specific checks:
166 for op_type in SupportedOperators.avg_pooling_ops:
167 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
168 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_type)
169 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_range)
170 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_height_range_valid_pad)
171 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_product_range_valid_pad)
172 # MAX pooling specific checks:
173 for op_type in SupportedOperators.max_pooling_ops:
174 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
175 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_type)
176 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_height_range)
177 self.specific_constraints[op_type].append(SupportedOperators.constraint_filter_product_range)
178 # TODO: Check ReduceSum restrictions
179
180 # Relu specific checks:
181 for op_type in SupportedOperators.relu_ops:
182 self.specific_constraints[op_type].append(SupportedOperators.constraint_quant_scale_inf)
183
184 # Resizing specific checks:
185 for op_type in SupportedOperators.resizing_ops:
186 self.specific_constraints[op_type].append(SupportedOperators.constraint_resize)
187
188 # Vector Product specific checks:
189 for op_type in SupportedOperators.fc_vector_products:
190 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_type)
191 self.specific_constraints[op_type].append(SupportedOperators.constraint_weights_const)
192 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_type)
193 self.specific_constraints[op_type].append(SupportedOperators.constraint_bias_40bit)
194
195 # Concat specific checks:
196 for op_type in (Op.Concat, Op.ConcatTFLite):
197 self.specific_constraints[op_type].append(SupportedOperators.constraint_axis_exists)
198 self.specific_constraints[op_type].append(SupportedOperators.constraint_axis_valid)
199 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_dimensionality)
200 self.specific_constraints[op_type].append(SupportedOperators.constraint_valid_dimensions)
201
202 # Element-wise checks:
203 for op_type in SupportedOperators.elem_wise_main_ops:
204 self.specific_constraints[op_type].append(SupportedOperators.constraint_elemwise_batch_size)
205 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_either_shapes)
206 # Unary specific checks:
207 for op_type in SupportedOperators.unary_elem_wise_main_ops:
208 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
209 # Binary Min/Max specific checks:
210 for op_type in SupportedOperators.binary_elem_wise_min_max_ops:
211 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_in_out_types)
212 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_quantization_parameters)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100213 self.specific_constraints[op_type].append(SupportedOperators.constraint_broadcast_shapes)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100214 # Binary Add/Mul/Sub specific checks:
215 for op_type in SupportedOperators.binary_elem_wise_add_mul_sub:
216 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_inputs_types)
217 self.specific_constraints[op_type].append(SupportedOperators.constraint_matching_signed)
218 self.specific_constraints[op_type].append(SupportedOperators.constraint_unsigned_valid)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100219 self.specific_constraints[op_type].append(SupportedOperators.constraint_broadcast_shapes)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100220 # Binary Shift specific checks:
221 for op_type in SupportedOperators.binary_elem_wise_shift_ops:
222 self.specific_constraints[op_type].append(SupportedOperators.constraint_inputs_int32)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100223 self.specific_constraints[op_type].append(SupportedOperators.constraint_broadcast_shapes)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100224
225 # SHL specific checks:
226 self.specific_constraints[Op.SHL].append(SupportedOperators.constraint_output_int32)
227
228 # CLZ specific checks:
229 self.specific_constraints[Op.CLZ].append(SupportedOperators.constraint_output_int32)
230
231 # Softmax specific checks:
232 self.specific_constraints[Op.Softmax].append(SupportedOperators.constraint_matching_shapes)
233 self.specific_constraints[Op.Softmax].append(SupportedOperators.constraint_matching_in_out_types)
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100234 self.specific_constraints[Op.Softmax].append(SupportedOperators.constraint_beta_value_range)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100235
236 # SplitV specific checks:
237 self.specific_constraints[Op.SplitV].append(SupportedOperators.constraint_splitv_inferred)
238
239 # StridedSlice specific checks:
240 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_stridedslice_input_count)
241 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_stridedslice_inputs_const)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100242 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_stridedslice_stride_values)
243 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_ellipsis_mask)
244 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_axis_masks)
245 self.specific_constraints[Op.StridedSlice].append(SupportedOperators.constraint_slice_ranges)
246
247 # LeakyRelu specific checks:
248 self.specific_constraints[Op.LeakyRelu].append(SupportedOperators.constraint_alpha_valid)
Tim Hall79d07d22020-04-27 18:20:16 +0100249
250 def is_operator_supported(self, op):
Michael McGeagh219ec072020-11-09 11:11:26 +0000251 ext_type = optype_to_builtintype(op.type)
Michael McGeagh1eeea512020-09-30 14:23:09 +0100252 if op.type not in SupportedOperators.supported_operators:
Louis Verhaard5f2ea2f2020-10-15 08:39:44 +0200253 if op.type not in (Op.Placeholder, Op.SubgraphInput, Op.Const):
Michael McGeagh219ec072020-11-09 11:11:26 +0000254 print(f"Info: {ext_type} '{op.name}' is a CPU only op")
Tim Hall79d07d22020-04-27 18:20:16 +0100255 return False
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100256
Michael McGeagh65fd9982020-10-20 11:49:28 +0100257 for constraint in self.generic_constraints + self.specific_constraints[op.type]:
Michael McGeagh37ded342020-10-01 15:37:44 +0100258 valid, extra = constraint(op)
259 if not valid:
Michael McGeagh219ec072020-11-09 11:11:26 +0000260 print(f"Warning: {ext_type} '{op.name}' is not supported on the NPU. Placing on CPU instead")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100261 print(f" - {constraint.__doc__}")
Michael McGeagh37ded342020-10-01 15:37:44 +0100262 if extra:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100263 print(f" {extra}")
Michael McGeagh37ded342020-10-01 15:37:44 +0100264 return False
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100265
Tim Hall79d07d22020-04-27 18:20:16 +0100266 return True
267
Michael McGeagh37ded342020-10-01 15:37:44 +0100268 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100269 def constraint_tens_no_dynamic(op):
270 "Input(s) and Output tensors must not be dynamic"
271 valid = True
272 extra = []
273 tensors = [tens for tens in op.inputs + op.outputs if tens]
274 for tens in tensors:
275 if (tens.shape == []) and (tens.values is None):
276 valid = False
277 extra.append(tens.name)
278 extra = ", ".join(extra)
279 return valid, f"Op has dynamic tensor(s): {extra}"
280
281 @staticmethod
Michael McGeagh37ded342020-10-01 15:37:44 +0100282 def constraint_tens_defined_shape(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100283 "Input(s) and Output tensors must have a defined shape"
Michael McGeagh37ded342020-10-01 15:37:44 +0100284 valid = True
285 extra = []
Michael McGeagh184b2502020-10-09 17:19:52 +0100286 tensors = [tens for tens in op.inputs + op.outputs if tens]
287 for tens in tensors:
288 if not tens.has_fully_defined_shape():
289 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100290 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100291 return valid, ", ".join(extra)
Michael McGeagh37ded342020-10-01 15:37:44 +0100292
Michael McGeagh184b2502020-10-09 17:19:52 +0100293 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100294 def constraint_tens_output_scalar(op):
295 "Output tensors cannot be scalar"
296 ofm = op.ofm
297 valid = ofm.shape != []
298 return valid, f"Output Tensor '{ofm.name}' is scalar"
Michael McGeagh184b2502020-10-09 17:19:52 +0100299
300 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000301 @docstring_format_args([_optype_formatter(shapeless_input_ops)])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100302 def constraint_tens_input_scalar(cls, op):
303 "Scalar Input tensors are only valid for op type: {}"
Michael McGeagh184b2502020-10-09 17:19:52 +0100304 valid = True
305 extra = []
306 tensors = [tens for tens in op.inputs if tens]
307 for tens in tensors:
308 if (tens.shape == []) and (op.type not in cls.shapeless_input_ops):
309 valid = False
310 extra.append(tens.name)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100311 extra = ", ".join(extra)
312 return valid, f"Op has scalar input tensor(s): {extra}"
Tim Hall79d07d22020-04-27 18:20:16 +0100313
Michael McGeagh37ded342020-10-01 15:37:44 +0100314 @staticmethod
315 def constraint_tens_shape_size(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100316 "Input(s) and Output tensors must not be greater than 4D"
Michael McGeagh37ded342020-10-01 15:37:44 +0100317 valid = True
318 extra = []
Michael McGeagh184b2502020-10-09 17:19:52 +0100319 tensors = [tens for tens in op.inputs + op.outputs if tens]
320 for tens in tensors:
321 if len(tens.shape) > 4:
322 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100323 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100324 return valid, ", ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100325
Michael McGeagh37ded342020-10-01 15:37:44 +0100326 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000327 @docstring_format_args([_list_formatter(supported_op_dtypes)])
Michael McGeagh37ded342020-10-01 15:37:44 +0100328 def constraint_tens_dtype(cls, op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100329 "Tensors must be of type: {}"
Michael McGeagh37ded342020-10-01 15:37:44 +0100330 valid = True
331 extra = []
332 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
Michael McGeagh65fd9982020-10-20 11:49:28 +0100333 if not tensors:
334 tensors = [tens for tens in op.inputs if tens]
Michael McGeagh37ded342020-10-01 15:37:44 +0100335 for tens in tensors:
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100336 if tens.dtype not in cls.supported_op_dtypes:
Michael McGeagh184b2502020-10-09 17:19:52 +0100337 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100338 extra.append(f"Tensor '{tens.name}' has data type: {tens.dtype}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100339 return valid, ", ".join(extra)
340
341 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000342 @docstring_format_args([_optype_formatter(supported_int32_tensor_ops)])
Michael McGeagh184b2502020-10-09 17:19:52 +0100343 def constraint_tens_int32_ops(cls, op):
344 "Tensors which are int32 are only valid when op type is: {}"
345 valid = True
346 extra = []
347 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
Michael McGeagh65fd9982020-10-20 11:49:28 +0100348 if not tensors:
349 tensors = [tens for tens in op.inputs if tens]
Michael McGeagh184b2502020-10-09 17:19:52 +0100350 for tens in tensors:
351 if (tens.dtype == DataType.int32) and (op.type not in cls.supported_int32_tensor_ops):
352 valid = False
353 extra.append(tens.name)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100354 extra = ", ".join(extra)
355 return valid, f"Op has int32 tensor(s): {extra}"
Andreas Nevalaineneadb1662020-09-01 15:36:26 +0200356
Michael McGeagh37ded342020-10-01 15:37:44 +0100357 @classmethod
358 @docstring_format_args(tens_dim_range)
359 def constraint_tens_dimension(cls, op):
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100360 "Tensor dimensions must be in the range [{}, {}]"
Michael McGeagh37ded342020-10-01 15:37:44 +0100361 tens_min, tens_max = cls.tens_dim_range
362 valid = True
363 extra = []
364 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
Michael McGeagh65fd9982020-10-20 11:49:28 +0100365 if not tensors:
366 tensors = [tens for tens in op.inputs if tens]
Michael McGeagh37ded342020-10-01 15:37:44 +0100367 for tens in tensors:
Michael McGeagh184b2502020-10-09 17:19:52 +0100368 if not all(tens_min <= dim <= tens_max for dim in tens.shape):
369 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100370 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100371 return valid, ", ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100372
Dwight Lidman8359a472020-09-28 15:53:40 +0200373 @staticmethod
374 def constraint_tens_quant_none_check(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100375 "Input(s), Output and Weight tensors must have quantization parameters"
Dwight Lidman8359a472020-09-28 15:53:40 +0200376 valid = True
377 extra = []
378 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
379 for tens in tensors:
380 if tens.quantization is None:
381 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100382 extra.append(tens.name)
383 extra = ", ".join(extra)
384 return valid, f"Op has tensors with missing quantization parameters: {extra}"
Dwight Lidman8359a472020-09-28 15:53:40 +0200385
Michael McGeagh184b2502020-10-09 17:19:52 +0100386 @staticmethod
387 def constraint_tens_quant_scale(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100388 "Input(s), Output and Weight tensors with quantization scales must be finite"
Michael McGeagh184b2502020-10-09 17:19:52 +0100389 valid = True
390 extra = []
391 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
392 for tens in tensors:
393 if (tens.quantization.scale_f32 is not None) and np.isinf(tens.quantization.scale_f32).any():
394 valid = False
Michael McGeagh65fd9982020-10-20 11:49:28 +0100395 extra.append(f"Tensor '{tens.name}' has quantization scale: {tens.quantization.scale_f32}")
Michael McGeagh184b2502020-10-09 17:19:52 +0100396 return valid, ", ".join(extra)
397
398 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000399 @docstring_format_args([_optype_formatter(per_axis_quant_ops)])
Dwight Lidmanc7187432020-11-16 17:40:46 +0100400 def constraint_tens_quant_per_axis(cls, op):
401 "Per-axis quantization is only supported for the following op types: {}"
402 valid = True
403 extra = []
404 if op.type not in cls.per_axis_quant_ops:
405 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
406 for tens in tensors:
407 if tens.quantization.is_per_axis():
408 valid = False
409 extra.append(tens.name)
410 return valid, "The following tensor(s) have per-axis quantization parameters: " + ", ".join(extra)
411
412 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000413 @docstring_format_args([_optype_formatter(supported_fused_activations)])
Michael McGeagh184b2502020-10-09 17:19:52 +0100414 def constraint_faf(cls, op):
415 "The fused activation function (if present) must be one of type: {}"
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100416 if op.activation is None:
417 res = True, "Op has no fused activation function"
418 else:
419 faf = op.activation.op_type
420 valid = faf in cls.supported_fused_activations
421 res = valid, f"Op has its fused activation function as: {faf}"
422 return res
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100423
424 @staticmethod
425 def constraint_stride_type(op):
426 "Stride values for both width and height must be integer types"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100427 w, h = op.get_kernel_stride()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100428 valid = is_integer(w) and is_integer(h)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100429 return valid, f"Op has stride WxH as: {repr(w)}x{repr(h)}"
Michael McGeagh184b2502020-10-09 17:19:52 +0100430
Michael McGeagh1eeea512020-09-30 14:23:09 +0100431 @classmethod
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100432 @docstring_format_args(stride_range)
433 def constraint_stride_range(cls, op):
434 "Stride values for both width and height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100435 w, h = op.get_kernel_stride()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100436 stride_min, stride_max = cls.stride_range
437 valid = (stride_min <= w <= stride_max) and (stride_min <= h <= stride_max)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100438 return valid, f"Op has stride WxH as: {w}x{h}"
Tim Hall79d07d22020-04-27 18:20:16 +0100439
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100440 @staticmethod
441 def constraint_dilation_type(op):
442 "Dilation factor values for both width and height must be integer types"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100443 w, h = op.get_kernel_dilation()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100444 valid = is_integer(w) and is_integer(h)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100445 return valid, f"Op has dilation factor WxH as: {repr(w)}x{repr(h)}"
Tim Hall79d07d22020-04-27 18:20:16 +0100446
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100447 @classmethod
448 @docstring_format_args(dilation_range)
449 def constraint_dilation_range(cls, op):
450 "Dilation factor values for both width and height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100451 w, h = op.get_kernel_dilation()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100452 dilation_min, dilation_max = cls.dilation_range
453 valid = (dilation_min <= w <= dilation_max) and (dilation_min <= h <= dilation_max)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100454 return valid, f"Op has dilation factor WxH as: {w}x{h}"
Tim Hall79d07d22020-04-27 18:20:16 +0100455
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100456 @classmethod
457 @docstring_format_args(dilated_height_range)
458 def constraint_dilated_height_range(cls, op):
459 "Dilated kernel height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100460 h = op.kernel.area_height()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100461 dilated_height_min, dilated_height_max = cls.dilated_height_range
462 valid = dilated_height_min <= h <= dilated_height_max
Michael McGeagh65fd9982020-10-20 11:49:28 +0100463 return valid, f"Op has dilated kernel height as: {h}"
Jacob Bohlin49d92122020-08-19 14:36:46 +0200464
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100465 @classmethod
466 @docstring_format_args(dilated_product_range)
467 def constraint_dilated_product_range(cls, op):
468 "Product of dilated kernel width and height must be in the range [{}, {}]"
Michael McGeagh65fd9982020-10-20 11:49:28 +0100469 product = op.kernel.area_width() * op.kernel.area_height()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100470 dilated_product_min, dilated_product_max = cls.dilated_product_range
471 valid = dilated_product_min <= product <= dilated_product_max
Michael McGeagh65fd9982020-10-20 11:49:28 +0100472 return valid, f"Op has product of dilated kernel width and height as: {product}"
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200473
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100474 @staticmethod
475 def constraint_weights_type(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100476 "Weight tensor must be 8-bit"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100477 weights = op.weights
478 valid = weights.element_size() == 1
Michael McGeagh65fd9982020-10-20 11:49:28 +0100479 return valid, f"Tensor '{weights.name}' is {int(weights.element_size() * 8)}-bit"
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200480
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100481 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100482 def constraint_weights_const(op):
483 "Weight tensor must be constant"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100484 weights = op.weights
485 valid = weights.values is not None
Michael McGeagh65fd9982020-10-20 11:49:28 +0100486 return valid, f"Tensor '{weights.name}' has non-constant values"
Andreas Nevalainen8854dc92020-09-24 13:43:00 +0200487
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100488 @classmethod
489 @docstring_format_args([weights_limit])
490 def constraint_weights_limit(cls, op):
491 "The sum of the weights cannot exceed {}"
492 weights = op.weights
493 values = weights.quant_values.astype(np.int64) - weights.quantization.zero_point
494 limit = np.amax(np.sum(np.absolute(values), axis=(0, 1, 2)))
495 valid = limit <= cls.weights_limit
Michael McGeagh65fd9982020-10-20 11:49:28 +0100496 return valid, f"Tensor '{weights.name}' has the sum of weights: {limit}"
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200497
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100498 @classmethod
Michael McGeagh34d29172020-11-25 12:36:23 +0000499 @docstring_format_args([_list_formatter(supported_bias_dtypes)])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100500 def constraint_bias_type(cls, op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100501 "Optional Bias tensor must be of type: {}"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100502 bias = op.bias
503 if bias:
504 valid = bias.dtype in cls.supported_bias_dtypes
Michael McGeagh65fd9982020-10-20 11:49:28 +0100505 return valid, f"Tensor '{bias.name}' has data type: {bias.dtype}"
506 return True, "Op has no bias tensor"
Tim Hall79d07d22020-04-27 18:20:16 +0100507
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100508 @staticmethod
509 def constraint_bias_40bit(op):
Michael McGeagh65fd9982020-10-20 11:49:28 +0100510 "Optional Bias tensor values must fit within 40-bits"
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100511 bias = op.bias
Fredrik Svedbergbdf09f92020-11-18 11:30:21 +0100512 if bias and bias.dtype == DataType.int64 and bias.quant_values is not None:
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100513 valid = all(len(bin(quant_value)[2:]) <= 40 for quant_value in bias.quant_values)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100514 return valid, f"Tensor '{bias.name}' has values larger than 40-bits"
515 return True, "Op has no bias tensor, or it fits in 40-bit"
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200516
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100517 @staticmethod
518 def constraint_batch_size(op):
519 "IFM Tensor batch size must be 1"
520 ifm = op.ifm
521 valid = ifm.shape[0] == 1
Michael McGeagh65fd9982020-10-20 11:49:28 +0100522 return valid, f"Tensor '{ifm.name}' has batch size: {ifm.shape[0]}"
523
524 @staticmethod
525 def constraint_quant_scale_inf(op):
526 "The IFM quantization scale divided by the OFM quantization scale must not be infinite"
527 ifm_scale = op.ifm.quantization.scale_f32
528 ofm_scale = op.ofm.quantization.scale_f32
529 valid = not np.isinf(ifm_scale / ofm_scale)
530 return valid, f"Op has infinite quantization scale. ifm_scale={ifm_scale} ofm_scale={ofm_scale}"
531
532 @staticmethod
533 def constraint_depth_multiplier(op):
534 "For depth multipliers > 1, IFM channels must be 1 and OFM channels must be equal to the depth multiplier"
535 depth_multiplier = op.attrs.get("depth_multiplier", 1)
536 if depth_multiplier > 1:
537 ifm_channels = op.ifm.shape[3]
538 ofm_channels = op.ofm.shape[3]
539 valid = (ifm_channels == 1) and (ofm_channels == depth_multiplier)
540 extra = (
541 f"Op has ifm_channels={ifm_channels}, ofm_channels={ofm_channels}"
542 f" and depth_multiplier={depth_multiplier}"
543 )
544 return valid, extra
545 return True, "Op has depth_multiplier=1"
546
547 @staticmethod
548 def constraint_tconv_stride(op):
549 "Stride values for both width and height must be 2"
550 w = op.kernel.stride.x
551 h = op.kernel.stride.y
552 valid = (w == 2) and (h == 2)
553 return valid, f"Op has stride WxH as: {w}x{h}"
554
555 @staticmethod
556 def constraint_tconv_same(op):
557 "SAME padding: OFM dimensions must equal IFM dimensions multiplied by stride"
558 if op.attrs["padding"] == b"SAME":
559 w = op.kernel.stride.x
560 h = op.kernel.stride.y
561 ifm_shape = op.ifm.shape
562 ofm_shape = op.ofm.shape
563 valid = (ofm_shape[1] == (ifm_shape[1] * h)) and (ofm_shape[2] == (ifm_shape[2] * w))
564 return valid, f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape} and stride WxH as {w}x{h}"
565 return True, "Op has padding=VALID"
566
567 @staticmethod
568 def constraint_tconv_valid(op):
569 """VALID padding: OFM dimensions must equal IFM dimensions multiplied by stride,
570 minus difference between kernel size and stride"""
571 if op.attrs["padding"] == b"VALID":
572 s_w = op.kernel.stride.x
573 s_h = op.kernel.stride.y
574 k_w = op.kernel.width
575 k_h = op.kernel.height
576 ifm_shape = op.ifm.shape
577 ofm_shape = op.ofm.shape
578 height_check = ofm_shape[1] == (ifm_shape[1] * s_h + max(k_h - s_h, 0))
579 width_check = ofm_shape[2] == (ifm_shape[2] * s_w + max(k_w - s_w, 0))
580 valid = height_check and width_check
581 extra = (
582 f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape},"
583 f" stride WxH as {s_w}x{s_h} and kernel WxH as {k_w}x{k_h}"
584 )
585 return valid, extra
586 return True, "Op has padding=SAME"
587
588 @staticmethod
589 def constraint_matching_in_out_types(op):
590 "IFM and OFM data types must match"
591 ifm_dtype = op.ifm.dtype
592 ofm_dtype = op.ofm.dtype
593 valid = ifm_dtype == ofm_dtype
594 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
595
596 @staticmethod
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100597 def constraint_beta_value_range(op):
598 "Beta value needs to be positive"
599 beta = op.attrs.get("beta", 1.0)
600 valid = beta >= 0
601 return valid, f"Op has beta={beta}"
602
603 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100604 def constraint_filter_type(op):
605 "Kernel filter values for both width and height must be integer types"
606 w = op.kernel.width
607 h = op.kernel.height
608 valid = is_integer(w) and is_integer(h)
609 return valid, f"Op has kernel filter WxH as: {repr(w)}x{repr(h)}"
610
611 @classmethod
612 @docstring_format_args(filter_range)
613 def constraint_filter_range(cls, op):
614 "Kernel filter values for both width and height must be in the range [{}, {}]"
615 if op.attrs["padding"] == b"SAME":
616 w = op.kernel.width
617 h = op.kernel.height
618 filter_min, filter_max = cls.filter_range
619 valid = (filter_min <= w <= filter_max) and (filter_min <= h <= filter_max)
620 return valid, f"Op has kernel filter WxH as: {w}x{h}"
621 return True, "Op has padding=VALID"
622
623 @classmethod
624 @docstring_format_args(filter_height_range)
625 def constraint_filter_height_range(cls, op):
626 "Kernel filter height must be in the range [{}, {}]"
627 h = op.kernel.height
628 filter_height_min, filter_height_max = cls.filter_height_range
629 valid = filter_height_min <= h <= filter_height_max
630 return valid, f"Op has kernel filter height as: {h}"
631
632 @classmethod
633 @docstring_format_args(filter_product_range)
634 def constraint_filter_product_range(cls, op):
635 "Product of kernel filter width and height must be in the range [{}, {}]"
636 product = op.kernel.elements_wh()
637 filter_product_min, filter_product_max = cls.filter_product_range
638 valid = filter_product_min <= product <= filter_product_max
639 return valid, f"Op has product of kernel filter width and height as: {product}"
640
641 @staticmethod
642 @docstring_format_args(filter_height_range)
643 def constraint_filter_height_range_valid_pad(op):
644 "VALID padding: Kernel filter height must be in the range [{}, {}]"
645 if op.attrs["padding"] == b"VALID":
646 return SupportedOperators.constraint_filter_height_range(op)
647 return True, "Op has padding=SAME"
648
649 @staticmethod
650 @docstring_format_args(filter_product_range)
651 def constraint_filter_product_range_valid_pad(op):
652 "VALID padding: Product of kernel filter width and height must be in the range [{}, {}]"
653 if op.attrs["padding"] == b"VALID":
654 return SupportedOperators.constraint_filter_product_range(op)
655 return True, "Op has padding=SAME"
656
657 @staticmethod
658 def constraint_resize(op):
659 """The width and height of the IFM and OFM must match one of the following criteria:
660 IFM W and H must both be 1
661 IFM must match OFM
662 OFM W and H must be 2x IFM -1, if align_corners is True
663 OFM W and H must be 2x IFM, if align_corners is False"""
664 # Easier to start with False condition as very few cases result in a supported resize
665 valid = False
666 ifm_shape = op.ifm.shape
667 ofm_shape = op.ofm.shape
668 align_corners = op.attrs.get("align_corners", False)
669 if len(ifm_shape) == 4:
670 # Valid if IFM W and H are both 1, or IFM and OFM shape are the same
671 if ((ifm_shape[1] == 1) and (ifm_shape[2] == 1)) or (ifm_shape == ofm_shape):
672 valid = True
673 else:
674 upscaled_shape = np.array(ifm_shape[1:3])
675 out_shape = np.array(ofm_shape[1:3])
676 while (upscaled_shape < out_shape).all():
677 upscaled_shape *= 2
678 if align_corners:
679 upscaled_shape -= 1
680 # Valid if OFM is 2x IFM (-1 for align corners)
681 if np.array_equal(out_shape, upscaled_shape):
682 valid = True
683 break
684 return valid, f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape} and align_corners={align_corners}"
685
686 @staticmethod
687 def constraint_matching_shapes(op):
688 "IFM and OFM shapes must match"
689 ifm_shape = op.ifm.shape
690 ofm_shape = op.ofm.shape
691 valid = ifm_shape == ofm_shape
692 return valid, f"Op has ifm_shape={ifm_shape} and ofm_shape={ofm_shape}"
693
694 @staticmethod
695 def constraint_splitv_inferred(op):
696 "Only one size is allowed to be inferred"
697 sizes = op.ifm2.values
698 valid = np.count_nonzero(sizes == -1) <= 1
699 return valid, f"Op has multiple inferred sizes (-1): {sizes}"
700
701 @staticmethod
702 def constraint_axis_exists(op):
703 "Axis attribute must exist"
704 axis = op.attrs.get("axis")
705 valid = axis is not None
706 return valid, f"Op has axis={axis}"
707
708 @staticmethod
709 def constraint_axis_valid(op):
710 "Axis attribute must be in the range [0, <ofm_dimensions>)"
711 dims = len(op.ofm.shape)
712 axis = op.attrs["axis"]
713 axis += dims if axis < 0 else 0
714 valid = 0 <= axis < dims
715 return valid, f"Op has ofm_dimensions={dims} and axis attribute is: {axis}"
716
717 @staticmethod
718 def constraint_matching_dimensionality(op):
719 "All Input dimensionalities must match OFM dimensionality"
720 valid = True
721 extra = []
722 ofm_dim = len(op.ofm.shape)
723 tensors = [tens for tens in op.inputs if tens]
724 for tens in tensors:
725 dim = len(tens.shape)
726 if dim != ofm_dim:
727 valid = False
728 extra.append(f"Tensor '{tens.name}' has dimension: {dim}")
729 extra = ", ".join(extra)
730 return valid, f"Op has ofm_dimension={ofm_dim} and the list of mismatching inputs are: {extra}"
731
732 @staticmethod
733 def constraint_valid_dimensions(op):
734 "All Input dimensions must match OFM dimension in all axes except the one defined by the axis attribute"
735 valid = True
736 extra = []
737 ofm_shape = op.ofm.shape
738 ofm_dim = len(ofm_shape)
739 axis = op.attrs["axis"]
740 axis += ofm_dim if axis < 0 else 0
741 tensors = [tens for tens in op.inputs if tens]
742 for tens in tensors:
743 if any(tens.shape[dim] != ofm_shape[dim] for dim in range(ofm_dim) if dim != axis):
744 valid = False
745 extra.append(f"Tensor '{tens.name}' has shape: {tens.shape}")
746 extra = ", ".join(extra)
747 return valid, f"Op has axis={axis}, ofm_shape={ofm_shape} and the list of mismatching inputs are: {extra}"
748
749 @staticmethod
750 def constraint_stridedslice_input_count(op):
751 "Exactly 4 Input tensors are required"
752 inputs = len(op.inputs)
753 valid = inputs == 4
754 return valid, f"Op has {inputs} inputs"
755
756 @staticmethod
757 def constraint_stridedslice_inputs_const(op):
758 "Begin, End and Stride Input tensors must be constant"
759 valid = True
760 extra = []
761 _, begin, end, strides = op.inputs
762 if begin.values is None:
763 valid = False
764 extra.append(f"Begin tensor '{begin.name}'")
765 if end.values is None:
766 valid = False
767 extra.append(f"End tensor '{end.name}'")
768 if strides.values is None:
769 valid = False
770 extra.append(f"Stride tensor '{strides.name}'")
771 extra = ", ".join(extra)
772 return valid, f"Op has non-constant tensors: {extra}"
773
774 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100775 def constraint_stridedslice_stride_values(op):
776 "All Strides values must be 1"
777 strides = op.inputs[3]
778 valid = all(stride == 1 for stride in strides.values)
779 return valid, f"Op has strides values {strides.values}"
Tim Hall79d07d22020-04-27 18:20:16 +0100780
Michael McGeagh65fd9982020-10-20 11:49:28 +0100781 @staticmethod
782 def constraint_ellipsis_mask(op):
783 "ellipsis_mask must be 0"
784 ellipsis = op.attrs["ellipsis_mask"]
785 valid = ellipsis == 0
786 return valid, f"Op has ellipsis mask as: {ellipsis}"
Jacob Bohlincf7da102020-05-20 09:03:40 +0200787
Michael McGeagh65fd9982020-10-20 11:49:28 +0100788 @staticmethod
789 def constraint_axis_masks(op):
790 "new_axis_mask and shrink_axis_mask cannot both be set"
791 new_axis = op.attrs["new_axis_mask"]
792 shrink_axis = op.attrs["shrink_axis_mask"]
793 valid = (new_axis == 0) or (shrink_axis == 0)
794 return valid, f"Op has new_axis_mask={new_axis} and shrink_axis_mask={shrink_axis}"
Jacob Bohlincf7da102020-05-20 09:03:40 +0200795
Michael McGeagh65fd9982020-10-20 11:49:28 +0100796 @staticmethod
797 def constraint_slice_ranges(op):
798 "Slice 'end' values must be greater than 'begin' values"
799 ifm, begin, end, _ = op.inputs
800 # Calculate offset begin/end
801 offset_begin = get_slice_offsets(ifm.shape, begin, op.attrs["begin_mask"], is_begin=True)
802 offset_end = get_slice_offsets(ifm.shape, end, op.attrs["end_mask"], is_begin=False)
803 # Check "end - begin" doesn't result in any zero or negative elements
804 valid = all((e - b) > 0 for b, e in zip(offset_begin, offset_end))
805 return valid, f"Op has begin_values={begin.values} and end_values={end.values}"
Tim Hall79d07d22020-04-27 18:20:16 +0100806
Michael McGeagh65fd9982020-10-20 11:49:28 +0100807 @staticmethod
808 def constraint_matching_inputs_types(op):
809 "Both Input data types must match"
810 ifm_dtype = op.ifm.dtype
811 ifm2_dtype = op.ifm2.dtype
812 valid = ifm_dtype == ifm2_dtype
813 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100814
Michael McGeagh65fd9982020-10-20 11:49:28 +0100815 @staticmethod
816 def constraint_matching_signed(op):
817 "For IFM that are signed, OFM must also be signed"
818 valid = True
819 ifm_dtype = op.ifm.dtype
820 ofm_dtype = op.ofm.dtype
821 if ifm_dtype.type & BaseType.Signed:
822 valid = bool(ofm_dtype.type & BaseType.Signed)
823 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100824
Michael McGeagh65fd9982020-10-20 11:49:28 +0100825 @staticmethod
826 def constraint_unsigned_valid(op):
827 "For IFM that are unsigned, OFM must either be the same type or int32"
828 valid = True
829 ifm_dtype = op.ifm.dtype
830 ofm_dtype = op.ofm.dtype
831 if ifm_dtype.type & BaseType.Unsigned:
832 valid = (ifm_dtype == ofm_dtype) or (ofm_dtype == DataType.int32)
833 return valid, f"Op has ifm_dtype={ifm_dtype} and ofm_dtype={ofm_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100834
Michael McGeagh65fd9982020-10-20 11:49:28 +0100835 @staticmethod
836 def constraint_inputs_int32(op):
837 "Both Input data types must be int32"
838 ifm_dtype = op.ifm.dtype
839 ifm2_dtype = op.ifm2.dtype
840 valid = (ifm_dtype == DataType.int32) and (ifm2_dtype == DataType.int32)
841 return valid, f"Op has ifm_dtype={ifm_dtype} and ifm2_dtype={ifm2_dtype}"
Tim Hall79d07d22020-04-27 18:20:16 +0100842
Michael McGeagh65fd9982020-10-20 11:49:28 +0100843 @staticmethod
844 def constraint_output_int32(op):
845 "OFM must be int32"
846 ofm_dtype = op.ofm.dtype
847 valid = ofm_dtype == DataType.int32
848 return valid, f"Op has ofm_dtype={ofm_dtype}"
Dwight Lidman42fed942020-05-29 09:37:03 +0200849
Michael McGeagh65fd9982020-10-20 11:49:28 +0100850 @staticmethod
851 def constraint_matching_quantization_parameters(op):
852 "Both Input quantization parameters must match OFM quantization parameters"
853 valid = True
854 extra = []
855 if not check_quantized_tens_scaling_equal(op.ofm, op.ifm):
856 valid = False
857 extra.append(op.ifm.name)
858 if not check_quantized_tens_scaling_equal(op.ofm, op.ifm2):
859 valid = False
860 extra.append(op.ifm2.name)
861 extra = ", ".join(extra)
862 return valid, f"Op has tensors with different quantization parameters to the OFM '{op.ofm.name}': {extra}"
Dwight Lidman8359a472020-09-28 15:53:40 +0200863
Michael McGeagh65fd9982020-10-20 11:49:28 +0100864 @staticmethod
865 def constraint_elemwise_batch_size(op):
866 "Batch size must be 1 for Input tensors with more than 2 dimensions"
867 valid = True
868 extra = []
869 for tens in (op.ifm, op.ifm2):
870 # Unary ops have ifm2 as None
871 if tens is not None:
872 if (len(tens.shape) > 2) and (tens.shape[0] != 1):
873 valid = False
874 extra.append(tens.name)
875 extra = ", ".join(extra)
876 return valid, f"Op has invalid input tensors: {extra}"
Jacob Bohlin49d92122020-08-19 14:36:46 +0200877
Michael McGeagh65fd9982020-10-20 11:49:28 +0100878 @staticmethod
879 def constraint_matching_either_shapes(op):
880 "At least one Input's shape must match the OFM's shape"
881 ifm_shape = op.ifm.shape
882 ifm2_shape = op.ifm2.shape if op.ifm2 else None
883 ofm_shape = op.ofm.shape
884 valid = (ifm_shape == ofm_shape) or (ifm2_shape == ofm_shape)
885 return valid, f"Op has ifm_shape={ifm_shape}, ifm2_shape={ifm2_shape} and ofm_shape={ofm_shape}"
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200886
Michael McGeagh65fd9982020-10-20 11:49:28 +0100887 @staticmethod
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100888 def constraint_broadcast_shapes(op):
889 "Broadcasting is only allowed for rank indices with dimension 1, from either IFM1 or IFM2"
890 ifm_shape = op.ifm.shape
891 ifm2_shape = op.ifm2.shape if op.ifm2 else None
892 ofm_shape = op.ofm.shape
893 valid = True
894 if ifm_shape is not None and ifm2_shape is not None:
895 # align trailing dimensions
896 size = min(len(ifm_shape), len(ifm2_shape))
897 for i, i2, o in zip(ifm_shape[-size:], ifm2_shape[-size:], ofm_shape[-size:]):
898 mi = max(i, i2)
899 # Input dimensions should match or one should be of dimension 1
900 # Output dimension should match the largest input dimension, together
901 # with constraint_match_either_shapes ensures broadcast from only one input
902 if not (i == i2 or i == 1 or i2 == 1) or o != mi:
903 valid = False
904 break
905
906 return valid, f"Op has ifm_shape={ifm_shape} and ifm2_shape={ifm2_shape}"
907
908 @staticmethod
Michael McGeagh65fd9982020-10-20 11:49:28 +0100909 def constraint_alpha_valid(op):
910 "Alpha must not be negative"
911 alpha = op.attrs["alpha"]
912 valid = alpha >= 0
913 return valid, f"Op has alpha={alpha}"