blob: 8ec7720769dd3ddfa02b36ecb9eb6235735c0aa7 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Charles Xu87c13502020-08-06 12:17:26 +020018import numpy as np
19
Tim Hallc30f4952020-06-15 20:47:35 +010020from .data_type import BaseType
21from .data_type import DataType
Tim Hall79d07d22020-04-27 18:20:16 +010022
23
24class SupportedOperators:
Fredrik Svedberg880e7352020-08-25 11:31:47 +020025 def __init__(self):
Tim Hall79d07d22020-04-27 18:20:16 +010026 # Categorised lists of supported operators
Fredrik Svedberga0c36242020-06-03 15:43:31 +020027 self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead",))
28 self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D",))
Tim Hall79d07d22020-04-27 18:20:16 +010029 self.depthwise_convolution_ops = set(
Fredrik Svedberga0c36242020-06-03 15:43:31 +020030 ("DepthwiseConv2dBiasAct", "DepthwiseConv2dNative", "QuantizedDepthwiseConv2D,")
Tim Hall79d07d22020-04-27 18:20:16 +010031 )
Jacob Bohlincf7da102020-05-20 09:03:40 +020032 self.transpose_convolution_ops = set(("Conv2DBackpropInput",))
Fredrik Svedberga0c36242020-06-03 15:43:31 +020033 self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct",))
34 self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct",))
35 self.pooling_ops = set(("ReduceSum",)) | self.max_pooling_ops | self.avg_pooling_ops
Dwight Lidman42fed942020-05-29 09:37:03 +020036 self.resizing_ops = set(("ResizeBilinear",))
Fredrik Svedberga0c36242020-06-03 15:43:31 +020037 self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct",))
Tim Hall79d07d22020-04-27 18:20:16 +010038 self.mac_main_ops = (
39 # convolutions
40 self.convolution_ops
41 # depth-wise convolutions
42 | self.depthwise_convolution_ops
Jacob Bohlincf7da102020-05-20 09:03:40 +020043 # transpose convolutions
44 | self.transpose_convolution_ops
Tim Hall79d07d22020-04-27 18:20:16 +010045 # pooling
46 | self.pooling_ops
Dwight Lidman42fed942020-05-29 09:37:03 +020047 # resizing/upscaling
48 | self.resizing_ops
Tim Hall79d07d22020-04-27 18:20:16 +010049 # FC layers
50 | self.fc_vector_products
51 # RNN/LSTM/GRU
Fredrik Svedberga0c36242020-06-03 15:43:31 +020052 | set(("BlockLSTM",))
Tim Hall79d07d22020-04-27 18:20:16 +010053 )
Fredrik Svedberga0c36242020-06-03 15:43:31 +020054 self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs", "CLZ",))
55 self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum",))
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +020056 self.binary_elem_wise_shift_ops = set(("SHL", "SHR",))
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020057 self.binary_elem_wise_add_mul_sub = set(
Fredrik Svedberg1575b942020-08-18 13:19:18 +020058 ("AddAct", "MulAct", "SubAct", "QuantizedAdd", "QuantizedSub", "QuantizedMul", "Mul", "Add", "Sub",)
Tim Hall79d07d22020-04-27 18:20:16 +010059 )
Fredrik Svedberg1575b942020-08-18 13:19:18 +020060 self.binary_elem_wise_main_ops = (
61 self.binary_elem_wise_min_max_ops | self.binary_elem_wise_add_mul_sub | self.binary_elem_wise_shift_ops
62 )
Dwight Lidmanf995db72020-04-27 11:15:12 +020063 self.elem_wise_main_ops = self.binary_elem_wise_main_ops | self.unary_elem_wise_main_ops
Tim Hall79d07d22020-04-27 18:20:16 +010064 self.activation_ops = set(
Fredrik Svedberga0c36242020-06-03 15:43:31 +020065 (
66 "QuantizedRelu",
67 "QuantizedRelu1",
68 "QuantizedRelu6",
69 "Relu",
70 "Relu6",
71 "ReluN1To1",
72 "Sigmoid",
73 "Tanh",
74 "Softmax",
75 )
Tim Hall79d07d22020-04-27 18:20:16 +010076 )
77 self.npu_post_ops = (
78 # activation functions
79 self.activation_ops
80 # concatenation write direction
Fredrik Svedberga0c36242020-06-03 15:43:31 +020081 | set(("ConcatSliceWrite",))
Tim Hall79d07d22020-04-27 18:20:16 +010082 # bias add and batch norm
Fredrik Svedberga0c36242020-06-03 15:43:31 +020083 | set(("QuantizedBiasAdd", "Requantize", "QuantizedBatchNorm", "BiasAdd", "FusedBatchNorm",))
Jacob Bohlin9fbc4912020-06-29 11:58:50 +020084 # Quantization
85 | set(("Quantize",))
Tim Hall79d07d22020-04-27 18:20:16 +010086 )
Fredrik Svedberga0c36242020-06-03 15:43:31 +020087 self.split_ops = set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped", "Unpack",))
88 self.concat_ops = set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped", "Pack",))
Tim Hall79d07d22020-04-27 18:20:16 +010089 self.memory_only_ops = (
Fredrik Svedberga0c36242020-06-03 15:43:31 +020090 set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims",)) | self.concat_ops | self.split_ops
Tim Hall79d07d22020-04-27 18:20:16 +010091 )
Dwight Lidman7579c752020-08-24 16:05:47 +020092 self.shapeless_input_ops = self.binary_elem_wise_main_ops | set(("Split", "SplitV",))
Fredrik Svedberga0c36242020-06-03 15:43:31 +020093 self.supported_fused_activations = set(("Relu", "Relu6", "ReluN1To1", "Tanh", "Sigmoid", "LUT",))
Tim Hall79d07d22020-04-27 18:20:16 +010094 self.supported_operators = (
95 self.npu_pre_ops | self.mac_main_ops | self.elem_wise_main_ops | self.npu_post_ops | self.memory_only_ops
96 )
97 # Setup supported operator restriction checkers
98 self.supported_operator_restrictions = {}
99 self.supported_operator_restrictions.update(
100 {op: self.check_convolution_restrictions for op in self.convolution_ops}
101 )
102 self.supported_operator_restrictions.update(
103 {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
104 )
Jacob Bohlincf7da102020-05-20 09:03:40 +0200105 self.supported_operator_restrictions.update(
106 {op: self.check_transpose_convolution_restrictions for op in self.transpose_convolution_ops}
107 )
Tim Hall79d07d22020-04-27 18:20:16 +0100108 self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
Dwight Lidman42fed942020-05-29 09:37:03 +0200109 self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops})
Tim Hall79d07d22020-04-27 18:20:16 +0100110 self.supported_operator_restrictions.update(
111 {op: self.check_vector_product_restrictions for op in self.fc_vector_products}
112 )
113 self.supported_operator_restrictions.update(
114 {op: self.check_element_wise_restrictions for op in self.elem_wise_main_ops}
115 )
116 self.supported_operator_restrictions.update(
117 {op: self.check_memory_only_restrictions for op in self.memory_only_ops}
118 )
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200119 self.supported_operator_restrictions.update(
Tim Halle3786ac2020-07-28 17:40:50 +0100120 {op: self.check_quantization_restrictions_binary_elem_wise for op in self.binary_elem_wise_min_max_ops}
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200121 )
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200122 self.supported_operator_restrictions.update({op: self.check_activation_ops for op in self.activation_ops})
Tim Hall79d07d22020-04-27 18:20:16 +0100123
124 def is_operator_supported(self, op):
125 if op.type not in self.supported_operators:
126 return False
127 if not self.check_generic_restrictions(op):
128 return False
129 if op.type in self.supported_operator_restrictions:
130 return self.supported_operator_restrictions[op.type](op)
131 return True
132
133 def check_generic_restrictions(self, op):
134 # check fully defined shapes
Dwight Lidman25733112020-08-17 11:56:10 +0200135 for t in op.inputs:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200136 if not t:
137 continue
Tim Hall79d07d22020-04-27 18:20:16 +0100138 if not t.has_fully_defined_shape():
Dwight Lidman25733112020-08-17 11:56:10 +0200139 print("Warning:", op.type, "has input(s) of undefined shape, placing on CPU")
140 return False
Dwight Lidman7579c752020-08-24 16:05:47 +0200141 if t.shape == [] and op.type not in self.shapeless_input_ops:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200142 print(
143 "Warning:",
144 op.type,
145 "has input(s) of shape [].",
146 "Scalar input or broadcasting is not supported for this operator,",
147 "placing on CPU",
148 )
Dwight Lidman25733112020-08-17 11:56:10 +0200149 return False
150 for t in op.outputs:
151 if not t.has_fully_defined_shape():
152 print("Warning:", op.type, "has output(s) of undefined shape, placing on CPU")
153 return False
154 if t.shape == []:
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200155 print(
156 "Warning:",
157 op.type,
158 "has output(s) of shape [].",
159 "Scalar input or broadcasting is not supported for this operator,",
160 "placing on CPU",
161 )
Tim Hall79d07d22020-04-27 18:20:16 +0100162 return False
163
164 # check data type
165 tensors = [t for t in op.get_ifm_ifm2_weights_ofm() if t is not None]
166 if not tensors:
167 tensors = op.inputs
168 for t in tensors:
169 if not (t.dtype.type & BaseType.Int):
170 return False
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200171 if (
172 t.element_size() > 2
Fredrik Svedberg1575b942020-08-18 13:19:18 +0200173 and op.type
174 not in set(("Requantize", "ReduceSum", "CLZ",))
175 | self.binary_elem_wise_add_mul_sub
176 | self.binary_elem_wise_shift_ops
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200177 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100178 return False
179 # check size
180 if any(dim > 65536 for dim in t.shape):
181 return False
182
183 # check fused activations
184 if (
185 "fused_activation_function" in op.attrs
186 and op.attrs["fused_activation_function"] is not None
187 and op.attrs["fused_activation_function"] not in self.supported_fused_activations
188 ):
189 return False
190 return True
191
192 def check_convolution_restrictions(self, op):
193 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200194 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100195 return False
196
197 # check dilation
198 dilation_w_factor = op.attrs.get("dilation_w_factor", 1)
199 dilation_h_factor = op.attrs.get("dilation_h_factor", 1)
200 if dilation_w_factor > 2 or dilation_h_factor > 2:
201 return False
202
203 # check data type
Jacob Bohlin49d92122020-08-19 14:36:46 +0200204 ifm_tensor, _, weight_tensor, bias_tensor, _ = op.get_ifm_ifm2_weights_biases_ofm()
Tim Hall79d07d22020-04-27 18:20:16 +0100205 if weight_tensor.element_size() > 1:
206 return False
207
Jacob Bohlin49d92122020-08-19 14:36:46 +0200208 if not self.check_bias_restrictions(bias_tensor):
209 return False
210
Andreas Nevalainenf0c59bf2020-08-26 10:56:23 +0200211 # check kernel size [HWIO]
212 dilated_weight_w = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_w_factor - 1)
213 dilated_weight_h = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_h_factor - 1)
214
215 if dilated_weight_w > 64 or dilated_weight_h > 64:
216 return False
217
218 # check weight sums over [HWI]
219 zero_point = weight_tensor.quantization.zero_point
220 quant_weights = weight_tensor.quant_values.astype(np.int64)
221 weights = quant_weights - zero_point
222 totals = np.sum(np.absolute(weights), axis=(0, 1, 2))
223
224 if np.amax(totals) > 127 * 65536:
Tim Hall79d07d22020-04-27 18:20:16 +0100225 return False
226
227 # check batch size
228 if ifm_tensor.shape[0] != 1:
229 return False
230 return True
231
232 def check_depthwise_convolution_restrictions(self, op):
233 # check depth
234 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
235 if op.attrs["depth_multiplier"] > 1 and not (
236 (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"])
237 ):
238 return False
239 return self.check_convolution_restrictions(op)
240
Jacob Bohlincf7da102020-05-20 09:03:40 +0200241 def check_transpose_convolution_restrictions(self, op):
242 # check stride
243 stride_h, stride_w = op.attrs["stride_h"], op.attrs["stride_w"]
244 if stride_h != stride_w != 2:
245 return False
246
247 # check output dimensions
248 ifm_tensor, weight_tensor, _, ofm_tensor = op.get_ifm_weights_biases_ofm()
249 ifm_h, ifm_w = ifm_tensor.shape[1], ifm_tensor.shape[2]
250 ofm_h, ofm_w = ofm_tensor.shape[1], ofm_tensor.shape[2]
251 if op.attrs["padding"] == b"SAME":
252 if (ofm_h != ifm_h * stride_h) or (ofm_w != ifm_w * stride_w):
253 return False
254 elif op.attrs["padding"] == b"VALID":
255 kernel_h, kernel_w = weight_tensor.shape[0], weight_tensor.shape[1]
Tim Hallc30f4952020-06-15 20:47:35 +0100256 if (ofm_h != (ifm_h) * stride_h + max(kernel_h - stride_h, 0)) or (
257 ofm_w != (ifm_w) * stride_w + max(kernel_w - stride_w, 0)
258 ):
Jacob Bohlincf7da102020-05-20 09:03:40 +0200259 return False
260
261 return self.check_convolution_restrictions(op)
262
Tim Hall79d07d22020-04-27 18:20:16 +0100263 def check_pooling_restrictions(self, op):
264 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200265 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100266 return False
267
268 # check data type
269 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
270 if ifm_tensor.dtype != ofm_tensor.dtype:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200271 if op.type != "ReduceSum":
272 return False
273 # TODO: else check ReduceSum restrictions.
Tim Hall79d07d22020-04-27 18:20:16 +0100274
275 # check batch size
276 if ifm_tensor.shape[0] != 1:
277 return False
278
279 if op.type in self.avg_pooling_ops:
280 # check kernel size
281 if op.attrs["padding"] == b"SAME" and (op.attrs["filter_width"] > 8 or op.attrs["filter_height"] > 8):
282 return False
Tim Hallc30f4952020-06-15 20:47:35 +0100283 if op.attrs["padding"] == b"VALID" and (
284 op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256
285 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100286 return False
287
288 if op.type in self.max_pooling_ops:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200289 # check kernel size (any padding)
290 if op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256:
Tim Hall79d07d22020-04-27 18:20:16 +0100291 return False
292 return True
293
Dwight Lidman42fed942020-05-29 09:37:03 +0200294 def check_resize_restrictions(self, op):
295 # check unsupported upscaling factor
296 if op.type == "ResizeBilinear":
Charles Xu9a03fdf2020-07-02 15:12:40 +0200297 if op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1:
298 return True
Charles Xu36ffaf32020-08-05 15:40:44 +0200299 if op.inputs[0].shape == op.outputs[0].shape:
300 return True
Charles Xu87c13502020-08-06 12:17:26 +0200301 upscaled_shape = np.array(op.inputs[0].shape[1:3])
302 out_shape = np.array(op.outputs[0].shape[1:3])
303 while (upscaled_shape < out_shape).all():
304 upscaled_shape *= 2
305 if op.attrs["align_corners"]:
306 upscaled_shape -= 1
307 if np.array_equal(out_shape, upscaled_shape):
308 return True
309 return False
Dwight Lidman42fed942020-05-29 09:37:03 +0200310
Tim Hall79d07d22020-04-27 18:20:16 +0100311 def check_vector_product_restrictions(self, op):
312 # check data type
Jacob Bohlin49d92122020-08-19 14:36:46 +0200313 _, _, weight_tensor, bias_tensor, _ = op.get_ifm_ifm2_weights_biases_ofm()
Tim Hall79d07d22020-04-27 18:20:16 +0100314 if weight_tensor.element_size() > 1:
315 return False
316
Jacob Bohlin49d92122020-08-19 14:36:46 +0200317 if not self.check_bias_restrictions(bias_tensor):
318 return False
319
Tim Hall79d07d22020-04-27 18:20:16 +0100320 return True
321
322 def check_element_wise_restrictions(self, op):
323 # check data type
324 ifm_tensor, ifm2_tensor, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200325 # input and output datatype must match for these operators
Tim Hallc30f4952020-06-15 20:47:35 +0100326 if (
327 op.type in self.binary_elem_wise_min_max_ops | self.unary_elem_wise_main_ops
328 and ifm_tensor.dtype != ofm_tensor.dtype
329 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100330 return False
Tim Hallc30f4952020-06-15 20:47:35 +0100331 if op.type in self.binary_elem_wise_add_mul_sub:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200332 # both inputs must have same type
Tim Hallc30f4952020-06-15 20:47:35 +0100333 if ifm_tensor.dtype != ifm2_tensor.dtype:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200334 return False
335 # signed input check
Tim Hallc30f4952020-06-15 20:47:35 +0100336 if ifm_tensor.dtype.type & BaseType.Signed:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200337 # output must be signed
Tim Hallc30f4952020-06-15 20:47:35 +0100338 if ofm_tensor.dtype.type & BaseType.Unsigned:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200339 return False
340 # and 8, 16 or 32-bit
Tim Hallc30f4952020-06-15 20:47:35 +0100341 if ofm_tensor.element_size() not in (1, 2, 4):
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200342 return False
343 # unsigned input check, output must be same type or int32
Tim Hallc30f4952020-06-15 20:47:35 +0100344 if ifm_tensor.dtype.type & BaseType.Unsigned and not (
345 ifm_tensor.dtype == ofm_tensor.dtype or ofm_tensor.dtype == DataType.int32
346 ):
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200347 return False
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +0200348 elif op.type in self.binary_elem_wise_shift_ops | set(("CLZ")):
349 if ifm_tensor.dtype != DataType.int32 or ifm2_tensor.dtype != DataType.int32:
350 return False
351 if op.type in ("CLZ", "SHL") and ofm_tensor.dtype != DataType.int32:
352 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100353
354 # check batch size
Dwight Lidmanf995db72020-04-27 11:15:12 +0200355 if len(ifm_tensor.shape) > 2 and ifm_tensor.shape[0] != 1:
Tim Hallc30f4952020-06-15 20:47:35 +0100356 return False
357 if op.type in self.binary_elem_wise_main_ops: # if op type is unary, ifm2_tensor is None
Dwight Lidmanf995db72020-04-27 11:15:12 +0200358 if len(ifm2_tensor.shape) > 2 and ifm2_tensor.shape[0] != 1:
359 return False
Dwight Lidman332a7042020-06-11 15:32:42 +0200360
361 # negative alpha values are not supported
362 if op.type == "LeakyRelu" and op.attrs["alpha"] < 0:
363 return False
364
Tim Hall79d07d22020-04-27 18:20:16 +0100365 return True
366
367 def check_memory_only_restrictions(self, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100368 if op.type == "StridedSlice":
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200369 # check stride size
Tim Hall79d07d22020-04-27 18:20:16 +0100370 if len(op.inputs) > 3 and any(stride != 1 for stride in op.inputs[3].values):
371 return False
Michael McGeaghecd20522020-07-31 16:59:45 +0100372 # check "end - begin" doesnt result in any zero or negative elements
373 if any((end - begin) <= 0 for begin, end in zip(op.inputs[1].values, op.inputs[2].values)):
374 return False
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200375 # check ellipsis_mask
376 if op.attrs["ellipsis_mask"] != 0:
377 return False
378 # check if both new_axis_mask and shrink_axis_mask have bit set
379 if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0:
380 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100381 return True
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200382
Tim Halle3786ac2020-07-28 17:40:50 +0100383 def check_quantization_restrictions_binary_elem_wise(self, op):
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200384 # makes sure IFM1, IFM2 and OFM quantization are equal for binary ops
Tim Halle3786ac2020-07-28 17:40:50 +0100385 assert len(op.inputs) >= 2 and len(op.outputs) == 1
386
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200387 if (
Tim Halle3786ac2020-07-28 17:40:50 +0100388 op.inputs[0].quantization is None
389 or not op.inputs[0].quantization.is_scaling_equal(op.inputs[1].quantization)
390 or not op.inputs[0].quantization.is_scaling_equal(op.outputs[0].quantization)
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200391 ):
392 print(
393 "Warning: Input/output tensors with different quantization is unsupported for the", op.type, "operator"
394 )
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200395 return False
Tim Halle3786ac2020-07-28 17:40:50 +0100396
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200397 return True
398
399 def check_activation_ops(self, op):
400 if op.type == "Softmax":
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200401 ifm_tensor = op.inputs[0]
402 ofm_tensor = op.outputs[0]
403
404 # check data type
405 if ifm_tensor.dtype != ofm_tensor.dtype:
406 return False
407
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +0200408 if ifm_tensor.dtype not in (DataType.uint8, DataType.int8, DataType.int16):
409 return False
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200410
411 # check batch size
412 if len(ifm_tensor.shape) in (2, 4) and ifm_tensor.shape[0] != 1:
413 return False
414
415 return True
Jacob Bohlin49d92122020-08-19 14:36:46 +0200416
417 def check_bias_restrictions(self, bias_tensor):
418 # check data type
419 if bias_tensor.dtype not in (DataType.int32, DataType.int64):
420 return False
421
422 # check if values fits in 40-bit
423 if bias_tensor.dtype == DataType.int64:
424 for value in bias_tensor.values:
425 if not (-(1 << 39) <= value < (1 << 39)):
426 return False
427
428 return True