blob: 3d4a09f3107dd847a1a7c4ffb212d63548891b19 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Charles Xu87c13502020-08-06 12:17:26 +020018import numpy as np
19
Tim Hallc30f4952020-06-15 20:47:35 +010020from .data_type import BaseType
21from .data_type import DataType
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020022from .operation import get_slice_offsets
Louis Verhaardaee5d752020-09-30 09:01:52 +020023from .operation import Op
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020024
25
Michael McGeagh37ded342020-10-01 15:37:44 +010026# Custom decorator function to allow formatting docstrings containing "{}"
27def docstring_format_args(args):
28 def docstring(func):
29 func.__doc__ = func.__doc__.format(*args)
30 return func
31
32 return docstring
33
34
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020035def warn_cpu(op, msg):
36 print("Warning: {} {}, placing on CPU".format(op.type, msg))
Tim Hall79d07d22020-04-27 18:20:16 +010037
38
39class SupportedOperators:
Michael McGeagh1eeea512020-09-30 14:23:09 +010040 # Categorised lists of supported operators
Louis Verhaardaee5d752020-09-30 09:01:52 +020041 npu_pre_ops = set((Op.SplitSliceRead,))
42 convolution_ops = set((Op.Conv2DBias, Op.Conv2D, Op.QuantizedConv2D,))
43 depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,))
44 transpose_convolution_ops = set((Op.Conv2DBackpropInput,))
45 max_pooling_ops = Op.op_set(Op.is_maxpool_op)
46 avg_pooling_ops = Op.op_set(Op.is_avgpool_op)
47 pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops
48 resizing_ops = set((Op.ResizeBilinear,))
49 fc_vector_products = set((Op.QuantizedMatMul, Op.MatMul, Op.FullyConnected,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010050 mac_main_ops = (
51 # RNN/LSTM/GRU
Louis Verhaardaee5d752020-09-30 09:01:52 +020052 set((Op.BlockLSTM,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010053 # convolutions
54 | convolution_ops
55 # depth-wise convolutions
56 | depthwise_convolution_ops
57 # transpose convolutions
58 | transpose_convolution_ops
59 # pooling
60 | pooling_ops
61 # resizing/upscaling
62 | resizing_ops
63 # FC layers
64 | fc_vector_products
65 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020066 unary_elem_wise_main_ops = Op.op_set(Op.is_unary_elementwise_op)
67 binary_elem_wise_min_max_ops = set((Op.Minimum, Op.Maximum,))
68 binary_elem_wise_shift_ops = set((Op.SHL, Op.SHR,))
69 binary_elem_wise_add_mul_sub = set((Op.Add, Op.Mul, Op.Sub,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010070 binary_elem_wise_main_ops = binary_elem_wise_min_max_ops | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
71 elem_wise_main_ops = binary_elem_wise_main_ops | unary_elem_wise_main_ops
Michael McGeagh37ded342020-10-01 15:37:44 +010072 supported_int32_tensor_ops = (
Louis Verhaardaee5d752020-09-30 09:01:52 +020073 set((Op.ReduceSum, Op.CLZ,)) | binary_elem_wise_add_mul_sub | binary_elem_wise_shift_ops
Michael McGeagh37ded342020-10-01 15:37:44 +010074 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020075 activation_ops = set((Op.Relu, Op.Relu6, Op.ReluN1To1, Op.Sigmoid, Op.Tanh, Op.Softmax,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010076 npu_post_ops = (
Michael McGeagh1eeea512020-09-30 14:23:09 +010077 # activation functions
Louis Verhaardaee5d752020-09-30 09:01:52 +020078 activation_ops
79 # concatenation write direction
80 | set((Op.ConcatSliceWrite,))
81 # Quantization
82 | set((Op.Quantize,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010083 )
Louis Verhaardaee5d752020-09-30 09:01:52 +020084 split_ops = set((Op.Split, Op.SplitV, Op.StridedSlice, Op.Slice, Op.UnpackReshaped, Op.Unpack,))
85 concat_ops = set((Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack,))
86 memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape, Op.ExpandDims,)) | concat_ops | split_ops
87 shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV,))
88 supported_fused_activations = set((Op.Relu, Op.Relu6, Op.ReluN1To1, Op.Tanh, Op.Sigmoid, Op.LUT,))
Michael McGeagh1eeea512020-09-30 14:23:09 +010089 supported_operators = npu_pre_ops | mac_main_ops | elem_wise_main_ops | npu_post_ops | memory_only_ops
Michael McGeagh37ded342020-10-01 15:37:44 +010090 supported_dtypes = set((DataType.uint8, DataType.int8, DataType.int16, DataType.int32))
91 # Defined ranges for allowed values:
92 tens_dim_range = (1, 65535)
Michael McGeagh1eeea512020-09-30 14:23:09 +010093
Fredrik Svedberg880e7352020-08-25 11:31:47 +020094 def __init__(self):
Tim Hall79d07d22020-04-27 18:20:16 +010095 # Setup supported operator restriction checkers
96 self.supported_operator_restrictions = {}
97 self.supported_operator_restrictions.update(
Michael McGeagh1eeea512020-09-30 14:23:09 +010098 {op: self.check_convolution_restrictions for op in SupportedOperators.convolution_ops}
Tim Hall79d07d22020-04-27 18:20:16 +010099 )
100 self.supported_operator_restrictions.update(
Michael McGeagh1eeea512020-09-30 14:23:09 +0100101 {op: self.check_depthwise_convolution_restrictions for op in SupportedOperators.depthwise_convolution_ops}
Tim Hall79d07d22020-04-27 18:20:16 +0100102 )
Jacob Bohlincf7da102020-05-20 09:03:40 +0200103 self.supported_operator_restrictions.update(
Michael McGeagh1eeea512020-09-30 14:23:09 +0100104 {op: self.check_transpose_convolution_restrictions for op in SupportedOperators.transpose_convolution_ops}
Tim Hall79d07d22020-04-27 18:20:16 +0100105 )
106 self.supported_operator_restrictions.update(
Michael McGeagh1eeea512020-09-30 14:23:09 +0100107 {op: self.check_pooling_restrictions for op in SupportedOperators.pooling_ops}
Tim Hall79d07d22020-04-27 18:20:16 +0100108 )
109 self.supported_operator_restrictions.update(
Michael McGeagh1eeea512020-09-30 14:23:09 +0100110 {op: self.check_resize_restrictions for op in SupportedOperators.resizing_ops}
Tim Hall79d07d22020-04-27 18:20:16 +0100111 )
Michael McGeagh1eeea512020-09-30 14:23:09 +0100112 self.supported_operator_restrictions.update(
113 {op: self.check_vector_product_restrictions for op in SupportedOperators.fc_vector_products}
114 )
115 self.supported_operator_restrictions.update(
116 {op: self.check_element_wise_restrictions for op in SupportedOperators.elem_wise_main_ops}
117 )
118 self.supported_operator_restrictions.update(
119 {op: self.check_memory_only_restrictions for op in SupportedOperators.memory_only_ops}
120 )
121 self.supported_operator_restrictions.update(
122 {op: self.check_activation_ops for op in SupportedOperators.activation_ops}
123 )
Michael McGeagh37ded342020-10-01 15:37:44 +0100124 # Setup the generic constraints
125 self.generic_constraints = []
126 self.generic_constraints.append(SupportedOperators.constraint_tens_defined_shape)
127 self.generic_constraints.append(SupportedOperators.constraint_tens_shapeless)
128 self.generic_constraints.append(SupportedOperators.constraint_tens_shape_size)
129 self.generic_constraints.append(SupportedOperators.constraint_tens_dtype)
130 self.generic_constraints.append(SupportedOperators.constraint_tens_dimension)
131 self.generic_constraints.append(SupportedOperators.constraint_faf)
132 self.generic_constraints.append(SupportedOperators.constraint_tens_quant_scale)
Tim Hall79d07d22020-04-27 18:20:16 +0100133
134 def is_operator_supported(self, op):
Michael McGeagh1eeea512020-09-30 14:23:09 +0100135 if op.type not in SupportedOperators.supported_operators:
Tim Hall79d07d22020-04-27 18:20:16 +0100136 return False
Michael McGeagh37ded342020-10-01 15:37:44 +0100137 for constraint in self.generic_constraints:
138 valid, extra = constraint(op)
139 if not valid:
140 print('Warning: "{}" is not supported on the NPU. Placing on CPU instead'.format(op.type))
141 print(" - {}".format(constraint.__doc__))
142 if extra:
143 print(" {}".format(extra))
144 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100145 if op.type in self.supported_operator_restrictions:
146 return self.supported_operator_restrictions[op.type](op)
147 return True
148
Michael McGeagh37ded342020-10-01 15:37:44 +0100149 @staticmethod
150 def constraint_tens_defined_shape(op):
151 "Input(s) and Output Tensors must have a defined shape"
152 valid = True
153 extra = []
154 for tens in op.inputs + op.outputs:
155 if tens:
156 valid &= tens.has_fully_defined_shape()
157 extra.append("shape={}".format(tens.shape))
158 return valid, " ".join(extra)
159
Michael McGeagh1eeea512020-09-30 14:23:09 +0100160 @classmethod
Michael McGeagh37ded342020-10-01 15:37:44 +0100161 @docstring_format_args([shapeless_input_ops])
162 def constraint_tens_shapeless(cls, op):
163 "Scalar or Broadcasting Tensors are only valid for Input Tensors, and when op type is: {}"
164 valid = True
165 extra = []
166 for tens in op.inputs:
167 if tens and tens.shape == []:
168 valid &= op.type in cls.shapeless_input_ops
169 extra.append("shape={}".format(tens.shape))
170 for tens in op.outputs:
171 if tens.shape == []:
172 valid = False
173 extra.append("shape={}".format(tens.shape))
174 return valid, " ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100175
Michael McGeagh37ded342020-10-01 15:37:44 +0100176 @staticmethod
177 def constraint_tens_shape_size(op):
178 "Input(s) and Output Tensors must not be greater than 4D"
179 valid = True
180 extra = []
181 for tens in op.inputs + op.outputs:
182 if tens:
183 valid &= len(tens.shape) <= 4
184 extra.append("shape={}".format(tens.shape))
185 return valid, " ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100186
Michael McGeagh37ded342020-10-01 15:37:44 +0100187 @classmethod
188 @docstring_format_args([supported_dtypes, supported_int32_tensor_ops])
189 def constraint_tens_dtype(cls, op):
190 "Tensors must be of type: {}. Tensors which are int32 are only valid when op type is: {}"
191 valid = True
192 extra = []
193 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
194 tensors = tensors if tensors else op.inputs
195 for tens in tensors:
196 if tens.dtype == DataType.int32:
197 valid &= op.type in cls.supported_int32_tensor_ops
198 else:
199 valid &= tens.dtype in cls.supported_dtypes
200 extra.append("dtype={}".format(tens.dtype))
201 return valid, " ".join(extra)
Andreas Nevalaineneadb1662020-09-01 15:36:26 +0200202
Michael McGeagh37ded342020-10-01 15:37:44 +0100203 @classmethod
204 @docstring_format_args(tens_dim_range)
205 def constraint_tens_dimension(cls, op):
206 "Tensor dimensions must be in the range {}-{} (inclusive)"
207 tens_min, tens_max = cls.tens_dim_range
208 valid = True
209 extra = []
210 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
211 tensors = tensors if tensors else op.inputs
212 for tens in tensors:
213 valid &= all(tens_min <= dim <= tens_max for dim in tens.shape)
214 extra.append("shape={}".format(tens.shape))
215 return valid, " ".join(extra)
Andreas Nevalaineneadb1662020-09-01 15:36:26 +0200216
Michael McGeagh37ded342020-10-01 15:37:44 +0100217 @classmethod
218 @docstring_format_args([supported_fused_activations])
219 def constraint_faf(cls, op):
220 "The fused activation function (if present) must be one of type: {}"
Louis Verhaardaee5d752020-09-30 09:01:52 +0200221 faf = op.activation
Michael McGeagh37ded342020-10-01 15:37:44 +0100222 valid = (faf is None) or (faf in cls.supported_fused_activations)
223 extra = "fused_activation_function={}".format(faf)
224 return valid, extra
225
226 @staticmethod
227 def constraint_tens_quant_scale(op):
228 "Tensors with quantization scales must be finite"
229 valid = True
230 extra = []
231 tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens]
232 for tens in tensors:
233 if tens.quantization is not None and tens.quantization.scale_f32 is not None:
234 valid &= not np.isinf(tens.quantization.scale_f32).any()
235 extra.append("quantization.scale_f32={}".format(tens.quantization.scale_f32))
236 return valid, " ".join(extra)
Tim Hall79d07d22020-04-27 18:20:16 +0100237
Michael McGeagh1eeea512020-09-30 14:23:09 +0100238 @classmethod
239 def check_convolution_restrictions(cls, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100240 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200241 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100242 return False
243
244 # check dilation
245 dilation_w_factor = op.attrs.get("dilation_w_factor", 1)
246 dilation_h_factor = op.attrs.get("dilation_h_factor", 1)
247 if dilation_w_factor > 2 or dilation_h_factor > 2:
248 return False
249
250 # check data type
Jacob Bohlin49d92122020-08-19 14:36:46 +0200251 ifm_tensor, _, weight_tensor, bias_tensor, _ = op.get_ifm_ifm2_weights_biases_ofm()
Tim Hall79d07d22020-04-27 18:20:16 +0100252 if weight_tensor.element_size() > 1:
253 return False
254
Michael McGeagh1eeea512020-09-30 14:23:09 +0100255 if not cls.check_bias_restrictions(bias_tensor):
Jacob Bohlin49d92122020-08-19 14:36:46 +0200256 return False
257
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200258 # check kernel size [HWIO]
259 dilated_weight_w = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_w_factor - 1)
260 dilated_weight_h = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_h_factor - 1)
261
262 if dilated_weight_w > 64 or dilated_weight_h > 64:
263 return False
264
Andreas Nevalainen8854dc92020-09-24 13:43:00 +0200265 # check non const weights
266 if weight_tensor.values is None:
267 print("Warning:", op.type, "has non-const weights, placing on CPU")
268 return False
269
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200270 # check weight sums over [HWI]
271 zero_point = weight_tensor.quantization.zero_point
272 quant_weights = weight_tensor.quant_values.astype(np.int64)
273 weights = quant_weights - zero_point
274 totals = np.sum(np.absolute(weights), axis=(0, 1, 2))
275
276 if np.amax(totals) > 127 * 65536:
Tim Hall79d07d22020-04-27 18:20:16 +0100277 return False
278
279 # check batch size
280 if ifm_tensor.shape[0] != 1:
281 return False
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200282
Tim Hall79d07d22020-04-27 18:20:16 +0100283 return True
284
Michael McGeagh1eeea512020-09-30 14:23:09 +0100285 @classmethod
286 def check_depthwise_convolution_restrictions(cls, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100287 # check depth
Louis Verhaardaee5d752020-09-30 09:01:52 +0200288 ifm_tensor, ofm_tensor = op.get_ifm_ofm()
Tim Hall79d07d22020-04-27 18:20:16 +0100289 if op.attrs["depth_multiplier"] > 1 and not (
290 (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"])
291 ):
292 return False
Michael McGeagh1eeea512020-09-30 14:23:09 +0100293 return cls.check_convolution_restrictions(op)
Tim Hall79d07d22020-04-27 18:20:16 +0100294
Michael McGeagh1eeea512020-09-30 14:23:09 +0100295 @classmethod
296 def check_transpose_convolution_restrictions(cls, op):
Jacob Bohlincf7da102020-05-20 09:03:40 +0200297 # check stride
298 stride_h, stride_w = op.attrs["stride_h"], op.attrs["stride_w"]
299 if stride_h != stride_w != 2:
300 return False
301
302 # check output dimensions
303 ifm_tensor, weight_tensor, _, ofm_tensor = op.get_ifm_weights_biases_ofm()
304 ifm_h, ifm_w = ifm_tensor.shape[1], ifm_tensor.shape[2]
305 ofm_h, ofm_w = ofm_tensor.shape[1], ofm_tensor.shape[2]
306 if op.attrs["padding"] == b"SAME":
307 if (ofm_h != ifm_h * stride_h) or (ofm_w != ifm_w * stride_w):
308 return False
309 elif op.attrs["padding"] == b"VALID":
310 kernel_h, kernel_w = weight_tensor.shape[0], weight_tensor.shape[1]
Tim Hallc30f4952020-06-15 20:47:35 +0100311 if (ofm_h != (ifm_h) * stride_h + max(kernel_h - stride_h, 0)) or (
312 ofm_w != (ifm_w) * stride_w + max(kernel_w - stride_w, 0)
313 ):
Jacob Bohlincf7da102020-05-20 09:03:40 +0200314 return False
315
Michael McGeagh1eeea512020-09-30 14:23:09 +0100316 return cls.check_convolution_restrictions(op)
Jacob Bohlincf7da102020-05-20 09:03:40 +0200317
Michael McGeagh1eeea512020-09-30 14:23:09 +0100318 @classmethod
319 def check_pooling_restrictions(cls, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100320 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200321 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100322 return False
323
324 # check data type
Louis Verhaardaee5d752020-09-30 09:01:52 +0200325 ifm_tensor, ofm_tensor = op.get_ifm_ofm()
Tim Hall79d07d22020-04-27 18:20:16 +0100326 if ifm_tensor.dtype != ofm_tensor.dtype:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200327 if op.type != Op.ReduceSum:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200328 return False
329 # TODO: else check ReduceSum restrictions.
Tim Hall79d07d22020-04-27 18:20:16 +0100330
331 # check batch size
332 if ifm_tensor.shape[0] != 1:
333 return False
334
Michael McGeagh1eeea512020-09-30 14:23:09 +0100335 if op.type in cls.avg_pooling_ops:
Tim Hall79d07d22020-04-27 18:20:16 +0100336 # check kernel size
337 if op.attrs["padding"] == b"SAME" and (op.attrs["filter_width"] > 8 or op.attrs["filter_height"] > 8):
338 return False
Tim Hallc30f4952020-06-15 20:47:35 +0100339 if op.attrs["padding"] == b"VALID" and (
340 op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256
341 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100342 return False
343
Michael McGeagh1eeea512020-09-30 14:23:09 +0100344 if op.type in cls.max_pooling_ops:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200345 # check kernel size (any padding)
346 if op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256:
Tim Hall79d07d22020-04-27 18:20:16 +0100347 return False
348 return True
349
Michael McGeagh1eeea512020-09-30 14:23:09 +0100350 @classmethod
351 def check_resize_restrictions(cls, op):
Dwight Lidman42fed942020-05-29 09:37:03 +0200352 # check unsupported upscaling factor
Louis Verhaardaee5d752020-09-30 09:01:52 +0200353 if op.type == Op.ResizeBilinear:
Charles Xu9a03fdf2020-07-02 15:12:40 +0200354 if op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1:
355 return True
Charles Xu36ffaf32020-08-05 15:40:44 +0200356 if op.inputs[0].shape == op.outputs[0].shape:
357 return True
Charles Xu87c13502020-08-06 12:17:26 +0200358 upscaled_shape = np.array(op.inputs[0].shape[1:3])
359 out_shape = np.array(op.outputs[0].shape[1:3])
360 while (upscaled_shape < out_shape).all():
361 upscaled_shape *= 2
362 if op.attrs["align_corners"]:
363 upscaled_shape -= 1
364 if np.array_equal(out_shape, upscaled_shape):
365 return True
366 return False
Dwight Lidman42fed942020-05-29 09:37:03 +0200367
Michael McGeagh1eeea512020-09-30 14:23:09 +0100368 @classmethod
369 def check_vector_product_restrictions(cls, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100370 # check data type
Jacob Bohlin49d92122020-08-19 14:36:46 +0200371 _, _, weight_tensor, bias_tensor, _ = op.get_ifm_ifm2_weights_biases_ofm()
Tim Hall79d07d22020-04-27 18:20:16 +0100372 if weight_tensor.element_size() > 1:
373 return False
374
Michael McGeagh1eeea512020-09-30 14:23:09 +0100375 if not cls.check_bias_restrictions(bias_tensor):
Jacob Bohlin49d92122020-08-19 14:36:46 +0200376 return False
377
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200378 # check non const weights
379 if weight_tensor.values is None:
380 print("Warning:", op.type, "has non-const weights, placing on CPU")
381 return False
382
Tim Hall79d07d22020-04-27 18:20:16 +0100383 return True
384
Michael McGeagh1eeea512020-09-30 14:23:09 +0100385 @classmethod
386 def check_element_wise_restrictions(cls, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100387 # check data type
388 ifm_tensor, ifm2_tensor, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200389 # input and output datatype must match for these operators
Tim Hallc30f4952020-06-15 20:47:35 +0100390 if (
Michael McGeagh1eeea512020-09-30 14:23:09 +0100391 op.type in cls.binary_elem_wise_min_max_ops | cls.unary_elem_wise_main_ops
Tim Hallc30f4952020-06-15 20:47:35 +0100392 and ifm_tensor.dtype != ofm_tensor.dtype
393 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100394 return False
Michael McGeagh1eeea512020-09-30 14:23:09 +0100395 if op.type in cls.binary_elem_wise_add_mul_sub:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200396 # both inputs must have same type
Tim Hallc30f4952020-06-15 20:47:35 +0100397 if ifm_tensor.dtype != ifm2_tensor.dtype:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200398 return False
399 # signed input check
Tim Hallc30f4952020-06-15 20:47:35 +0100400 if ifm_tensor.dtype.type & BaseType.Signed:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200401 # output must be signed
Tim Hallc30f4952020-06-15 20:47:35 +0100402 if ofm_tensor.dtype.type & BaseType.Unsigned:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200403 return False
404 # and 8, 16 or 32-bit
Tim Hallc30f4952020-06-15 20:47:35 +0100405 if ofm_tensor.element_size() not in (1, 2, 4):
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200406 return False
407 # unsigned input check, output must be same type or int32
Tim Hallc30f4952020-06-15 20:47:35 +0100408 if ifm_tensor.dtype.type & BaseType.Unsigned and not (
409 ifm_tensor.dtype == ofm_tensor.dtype or ofm_tensor.dtype == DataType.int32
410 ):
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200411 return False
Louis Verhaardaee5d752020-09-30 09:01:52 +0200412 elif op.type in cls.binary_elem_wise_shift_ops:
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +0200413 if ifm_tensor.dtype != DataType.int32 or ifm2_tensor.dtype != DataType.int32:
414 return False
Louis Verhaardaee5d752020-09-30 09:01:52 +0200415 if op.type in (Op.CLZ, Op.SHL) and ofm_tensor.dtype != DataType.int32:
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +0200416 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100417
418 # check batch size
Dwight Lidmanf995db72020-04-27 11:15:12 +0200419 if len(ifm_tensor.shape) > 2 and ifm_tensor.shape[0] != 1:
Tim Hallc30f4952020-06-15 20:47:35 +0100420 return False
Michael McGeagh1eeea512020-09-30 14:23:09 +0100421 if op.type in cls.binary_elem_wise_main_ops: # if op type is unary, ifm2_tensor is None
Dwight Lidmanf995db72020-04-27 11:15:12 +0200422 if len(ifm2_tensor.shape) > 2 and ifm2_tensor.shape[0] != 1:
423 return False
Dwight Lidman332a7042020-06-11 15:32:42 +0200424
425 # negative alpha values are not supported
Louis Verhaardaee5d752020-09-30 09:01:52 +0200426 if op.type == Op.LeakyRelu and op.attrs["alpha"] < 0:
Dwight Lidman332a7042020-06-11 15:32:42 +0200427 return False
428
Andreas Nevalainend8c032d2020-09-11 10:25:09 +0200429 # check if ifm or ifm2 has ofm shape
430 if ifm_tensor.shape != ofm_tensor.shape and ifm2_tensor.shape != ofm_tensor.shape:
431 return False
432
Michael McGeagh1eeea512020-09-30 14:23:09 +0100433 if op.type in cls.binary_elem_wise_min_max_ops and not cls.check_quantization_restrictions_binary_elem_wise(op):
Patrik Gustavsson530992a2020-09-30 13:26:59 +0200434 return False
435
Tim Hall79d07d22020-04-27 18:20:16 +0100436 return True
437
Michael McGeagh1eeea512020-09-30 14:23:09 +0100438 @classmethod
439 def check_memory_only_restrictions(cls, op):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200440 if op.type == Op.StridedSlice:
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200441 if len(op.inputs) != 4:
442 warn_cpu(op, "has {} input tensors, only 4 inputs are supported".format(len(op.inputs)))
Tim Hall79d07d22020-04-27 18:20:16 +0100443 return False
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200444 input_tens, begin_tens, end_tens, strides_tens = op.inputs
445 if begin_tens.values is None or end_tens.values is None or strides_tens.values is None:
446 warn_cpu(op, "has a non-constant begin, end, or stride input tensor, which is not supported")
447 return False
448 if not (
449 len(input_tens.shape)
450 == len(op.outputs[0].shape)
451 == len(begin_tens.values)
452 == len(end_tens.values)
453 == len(strides_tens.values)
454 ):
455 warn_cpu(op, "has input tensors with shapes that are not supported")
456 return False
457 # check stride size
458 if any(stride != 1 for stride in strides_tens.values):
459 warn_cpu(op, "has stride values {}, only stride 1 values are supported".format(strides_tens.values))
Michael McGeaghecd20522020-07-31 16:59:45 +0100460 return False
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200461 # check ellipsis_mask
462 if op.attrs["ellipsis_mask"] != 0:
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200463 warn_cpu(op, "ellipsis_mask is {}, only 0 is supported".format(op.attrs["ellipsis_mask"]))
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200464 return False
465 # check if both new_axis_mask and shrink_axis_mask have bit set
466 if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0:
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200467 warn_cpu(op, "new_axis_mask and shrink_axis_mask are both non-zero, which is not supported")
468 return False
469 # Calculate offset start/end
470 offset_start = get_slice_offsets(input_tens.shape, begin_tens, op.attrs["begin_mask"], is_begin=True)
471 offset_end = get_slice_offsets(input_tens.shape, end_tens, op.attrs["end_mask"], is_begin=False)
472 # check "end - begin" doesn't result in any zero or negative elements
473 if any((end - begin) <= 0 for begin, end in zip(offset_start, offset_end)):
474 warn_cpu(
475 op,
476 "has slice begin values {}, some of which are >= end values {}, which is illegal".format(
477 begin_tens.values, end_tens.values
478 ),
479 )
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200480 return False
Louis Verhaardaee5d752020-09-30 09:01:52 +0200481 if op.type == Op.SplitV:
Patrik Gustavsson271ddc32020-09-01 09:15:27 +0200482 # check that maximum one size is set to -1, indicating that size should be inferred
483 sizes = op.inputs[1].values
484 num_to_be_inferred = 0
485 for size in sizes:
486 if size == -1:
487 num_to_be_inferred += 1
488
489 if num_to_be_inferred > 1:
490 print("Warning:", op.type, "has more than one size to be inferred, which is illegal, placing on CPU")
491 return False
Louis Verhaardaee5d752020-09-30 09:01:52 +0200492 if op.type in set((Op.Concat, Op.ConcatTFLite,)):
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200493 axis = op.attrs.get("axis", None)
494 if axis is None:
495 print("Warning:", op.type, "invalid or missing axis, placing on CPU")
496 return False
497 if axis < 0:
498 axis += len(op.inputs[0].shape)
Patrik Gustavsson36ad73a2020-10-06 13:58:24 +0200499 if not 0 <= axis < len(op.inputs[0].shape):
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200500 print("Warning:", op.type, "invalid axis", axis, ", placing on CPU")
501 return False
502 ofm = op.outputs[0]
503 ofm_dims = len(ofm.shape)
504 for ifm in op.inputs:
505 if len(ifm.shape) != ofm_dims:
506 return False
507 for i in range(ofm_dims):
508 if i != axis and ifm.shape[i] != ofm.shape[i]:
Patrik Gustavsson530992a2020-09-30 13:26:59 +0200509 print(
510 "Warning:",
511 op.type,
512 "invalid ifm:",
513 ifm.name,
514 ifm.shape,
515 "mismatch in dimension",
516 i,
517 ", placing on CPU",
518 )
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200519 return False
Patrik Gustavsson271ddc32020-09-01 09:15:27 +0200520
Tim Hall79d07d22020-04-27 18:20:16 +0100521 return True
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200522
Michael McGeagh1eeea512020-09-30 14:23:09 +0100523 @classmethod
524 def check_quantization_restrictions_binary_elem_wise(cls, op):
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200525 # makes sure IFM1, IFM2 and OFM quantization are equal for binary ops
Tim Halle3786ac2020-07-28 17:40:50 +0100526 assert len(op.inputs) >= 2 and len(op.outputs) == 1
527
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200528 if (
Tim Halle3786ac2020-07-28 17:40:50 +0100529 op.inputs[0].quantization is None
Michael McGeagh34ad19b2020-09-04 15:44:23 +0100530 or not op.inputs[0].is_scaling_equal(op.inputs[1])
531 or not op.inputs[0].is_scaling_equal(op.outputs[0])
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200532 ):
533 print(
534 "Warning: Input/output tensors with different quantization is unsupported for the", op.type, "operator"
535 )
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200536 return False
Tim Halle3786ac2020-07-28 17:40:50 +0100537
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200538 return True
539
Michael McGeagh1eeea512020-09-30 14:23:09 +0100540 @classmethod
541 def check_activation_ops(cls, op):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200542 if op.type == Op.Softmax:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200543 ifm_tensor = op.inputs[0]
544 ofm_tensor = op.outputs[0]
545
546 # check data type
547 if ifm_tensor.dtype != ofm_tensor.dtype:
548 return False
549
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +0200550 if ifm_tensor.dtype not in (DataType.uint8, DataType.int8, DataType.int16):
551 return False
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200552
Fredrik Svedberg835d8e12020-09-04 09:46:17 +0200553 # check shape
Michael McGeagh37ded342020-10-01 15:37:44 +0100554 if ifm_tensor.shape != ofm_tensor.shape:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200555 return False
556
557 return True
Jacob Bohlin49d92122020-08-19 14:36:46 +0200558
Michael McGeagh1eeea512020-09-30 14:23:09 +0100559 @classmethod
560 def check_bias_restrictions(cls, bias_tensor):
Jacob Bohlin49d92122020-08-19 14:36:46 +0200561 # check data type
Jacob Bohlin258ebba2020-08-31 10:44:35 +0200562 if bias_tensor is not None and bias_tensor.dtype not in (DataType.int32, DataType.int64):
Jacob Bohlin49d92122020-08-19 14:36:46 +0200563 return False
564
565 # check if values fits in 40-bit
Jacob Bohlin258ebba2020-08-31 10:44:35 +0200566 if bias_tensor is not None and bias_tensor.dtype == DataType.int64:
Tim Hall71525172020-08-29 15:09:57 +0100567 for quant_value in bias_tensor.quant_values:
568 if not (-(1 << 39) <= quant_value < (1 << 39)):
Jacob Bohlin49d92122020-08-19 14:36:46 +0200569 return False
570
571 return True