Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # The SupportedOperators class which is a collection of all supported operators and parameter checks. |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 18 | from .data_type import BaseType |
| 19 | from .data_type import DataType |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 20 | |
| 21 | |
| 22 | class SupportedOperators: |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 23 | def __init__(self, softmax_support): |
| 24 | self.softmax_support = softmax_support |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 25 | # Categorised lists of supported operators |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 26 | self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead",)) |
| 27 | self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 28 | self.depthwise_convolution_ops = set( |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 29 | ("DepthwiseConv2dBiasAct", "DepthwiseConv2dNative", "QuantizedDepthwiseConv2D,") |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 30 | ) |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 31 | self.transpose_convolution_ops = set(("Conv2DBackpropInput",)) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 32 | self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct",)) |
| 33 | self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct",)) |
| 34 | self.pooling_ops = set(("ReduceSum",)) | self.max_pooling_ops | self.avg_pooling_ops |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 35 | self.resizing_ops = set(("ResizeBilinear",)) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 36 | self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 37 | self.mac_main_ops = ( |
| 38 | # convolutions |
| 39 | self.convolution_ops |
| 40 | # depth-wise convolutions |
| 41 | | self.depthwise_convolution_ops |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 42 | # transpose convolutions |
| 43 | | self.transpose_convolution_ops |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 44 | # pooling |
| 45 | | self.pooling_ops |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 46 | # resizing/upscaling |
| 47 | | self.resizing_ops |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 48 | # FC layers |
| 49 | | self.fc_vector_products |
| 50 | # RNN/LSTM/GRU |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 51 | | set(("BlockLSTM",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 52 | ) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 53 | self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs", "CLZ",)) |
| 54 | self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum",)) |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 55 | self.binary_elem_wise_add_mul_sub = set( |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 56 | ( |
| 57 | "AddAct", |
| 58 | "MulAct", |
| 59 | "SubAct", |
| 60 | "QuantizedAdd", |
| 61 | "QuantizedSub", |
| 62 | "QuantizedMul", |
| 63 | "Mul", |
| 64 | "Add", |
| 65 | "Sub", |
| 66 | "SHL", |
| 67 | "SHR", |
| 68 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 69 | ) |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 70 | self.binary_elem_wise_main_ops = self.binary_elem_wise_min_max_ops | self.binary_elem_wise_add_mul_sub |
Dwight Lidman | f995db7 | 2020-04-27 11:15:12 +0200 | [diff] [blame] | 71 | self.elem_wise_main_ops = self.binary_elem_wise_main_ops | self.unary_elem_wise_main_ops |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 72 | self.activation_ops = set( |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 73 | ( |
| 74 | "QuantizedRelu", |
| 75 | "QuantizedRelu1", |
| 76 | "QuantizedRelu6", |
| 77 | "Relu", |
| 78 | "Relu6", |
| 79 | "ReluN1To1", |
| 80 | "Sigmoid", |
| 81 | "Tanh", |
| 82 | "Softmax", |
| 83 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 84 | ) |
| 85 | self.npu_post_ops = ( |
| 86 | # activation functions |
| 87 | self.activation_ops |
| 88 | # concatenation write direction |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 89 | | set(("ConcatSliceWrite",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 90 | # bias add and batch norm |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 91 | | set(("QuantizedBiasAdd", "Requantize", "QuantizedBatchNorm", "BiasAdd", "FusedBatchNorm",)) |
Jacob Bohlin | 9fbc491 | 2020-06-29 11:58:50 +0200 | [diff] [blame] | 92 | # Quantization |
| 93 | | set(("Quantize",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 94 | ) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 95 | self.split_ops = set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped", "Unpack",)) |
| 96 | self.concat_ops = set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped", "Pack",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 97 | self.memory_only_ops = ( |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 98 | set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims",)) | self.concat_ops | self.split_ops |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 99 | ) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 100 | self.supported_fused_activations = set(("Relu", "Relu6", "ReluN1To1", "Tanh", "Sigmoid", "LUT",)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 101 | self.supported_operators = ( |
| 102 | self.npu_pre_ops | self.mac_main_ops | self.elem_wise_main_ops | self.npu_post_ops | self.memory_only_ops |
| 103 | ) |
| 104 | # Setup supported operator restriction checkers |
| 105 | self.supported_operator_restrictions = {} |
| 106 | self.supported_operator_restrictions.update( |
| 107 | {op: self.check_convolution_restrictions for op in self.convolution_ops} |
| 108 | ) |
| 109 | self.supported_operator_restrictions.update( |
| 110 | {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops} |
| 111 | ) |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 112 | self.supported_operator_restrictions.update( |
| 113 | {op: self.check_transpose_convolution_restrictions for op in self.transpose_convolution_ops} |
| 114 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 115 | self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops}) |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 116 | self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops}) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 117 | self.supported_operator_restrictions.update( |
| 118 | {op: self.check_vector_product_restrictions for op in self.fc_vector_products} |
| 119 | ) |
| 120 | self.supported_operator_restrictions.update( |
| 121 | {op: self.check_element_wise_restrictions for op in self.elem_wise_main_ops} |
| 122 | ) |
| 123 | self.supported_operator_restrictions.update( |
| 124 | {op: self.check_memory_only_restrictions for op in self.memory_only_ops} |
| 125 | ) |
Dwight Lidman | ebe26c7 | 2020-06-09 11:40:54 +0200 | [diff] [blame] | 126 | self.supported_operator_restrictions.update( |
| 127 | {op: self.check_quantization_restrictions for op in self.binary_elem_wise_min_max_ops} |
| 128 | ) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 129 | self.supported_operator_restrictions.update({op: self.check_activation_ops for op in self.activation_ops}) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 130 | |
| 131 | def is_operator_supported(self, op): |
| 132 | if op.type not in self.supported_operators: |
| 133 | return False |
| 134 | if not self.check_generic_restrictions(op): |
| 135 | return False |
| 136 | if op.type in self.supported_operator_restrictions: |
| 137 | return self.supported_operator_restrictions[op.type](op) |
| 138 | return True |
| 139 | |
| 140 | def check_generic_restrictions(self, op): |
| 141 | # check fully defined shapes |
| 142 | for t in op.inputs + op.outputs: |
| 143 | if not t.has_fully_defined_shape(): |
| 144 | print("Warning:", op, "has inputs/outputs of undefined shape, placing on CPU") |
| 145 | return False |
| 146 | |
| 147 | # check data type |
| 148 | tensors = [t for t in op.get_ifm_ifm2_weights_ofm() if t is not None] |
| 149 | if not tensors: |
| 150 | tensors = op.inputs |
| 151 | for t in tensors: |
| 152 | if not (t.dtype.type & BaseType.Int): |
| 153 | return False |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 154 | if ( |
| 155 | t.element_size() > 2 |
| 156 | and op.type not in set(("Requantize", "ReduceSum", "CLZ",)) | self.binary_elem_wise_add_mul_sub |
| 157 | ): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 158 | return False |
| 159 | # check size |
| 160 | if any(dim > 65536 for dim in t.shape): |
| 161 | return False |
| 162 | |
| 163 | # check fused activations |
| 164 | if ( |
| 165 | "fused_activation_function" in op.attrs |
| 166 | and op.attrs["fused_activation_function"] is not None |
| 167 | and op.attrs["fused_activation_function"] not in self.supported_fused_activations |
| 168 | ): |
| 169 | return False |
| 170 | return True |
| 171 | |
| 172 | def check_convolution_restrictions(self, op): |
| 173 | # check stride |
Dwight Lidman | 0538a77 | 2020-05-06 14:09:17 +0200 | [diff] [blame] | 174 | if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 175 | return False |
| 176 | |
| 177 | # check dilation |
| 178 | dilation_w_factor = op.attrs.get("dilation_w_factor", 1) |
| 179 | dilation_h_factor = op.attrs.get("dilation_h_factor", 1) |
| 180 | if dilation_w_factor > 2 or dilation_h_factor > 2: |
| 181 | return False |
| 182 | |
| 183 | # check data type |
| 184 | ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm() |
| 185 | if weight_tensor.element_size() > 1: |
| 186 | return False |
| 187 | |
| 188 | # check kernel size |
| 189 | dilated_weight_w = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_w_factor - 1) |
| 190 | dilated_weight_h = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_h_factor - 1) |
| 191 | if ( |
| 192 | dilated_weight_w > 64 |
| 193 | or dilated_weight_h > 64 |
| 194 | or dilated_weight_w * dilated_weight_h * weight_tensor.shape[2] > 127 * 65536 |
| 195 | ): |
| 196 | return False |
| 197 | |
| 198 | # check batch size |
| 199 | if ifm_tensor.shape[0] != 1: |
| 200 | return False |
| 201 | return True |
| 202 | |
| 203 | def check_depthwise_convolution_restrictions(self, op): |
| 204 | # check depth |
| 205 | ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm() |
| 206 | if op.attrs["depth_multiplier"] > 1 and not ( |
| 207 | (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"]) |
| 208 | ): |
| 209 | return False |
| 210 | return self.check_convolution_restrictions(op) |
| 211 | |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 212 | def check_transpose_convolution_restrictions(self, op): |
| 213 | # check stride |
| 214 | stride_h, stride_w = op.attrs["stride_h"], op.attrs["stride_w"] |
| 215 | if stride_h != stride_w != 2: |
| 216 | return False |
| 217 | |
| 218 | # check output dimensions |
| 219 | ifm_tensor, weight_tensor, _, ofm_tensor = op.get_ifm_weights_biases_ofm() |
| 220 | ifm_h, ifm_w = ifm_tensor.shape[1], ifm_tensor.shape[2] |
| 221 | ofm_h, ofm_w = ofm_tensor.shape[1], ofm_tensor.shape[2] |
| 222 | if op.attrs["padding"] == b"SAME": |
| 223 | if (ofm_h != ifm_h * stride_h) or (ofm_w != ifm_w * stride_w): |
| 224 | return False |
| 225 | elif op.attrs["padding"] == b"VALID": |
| 226 | kernel_h, kernel_w = weight_tensor.shape[0], weight_tensor.shape[1] |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 227 | if (ofm_h != (ifm_h) * stride_h + max(kernel_h - stride_h, 0)) or ( |
| 228 | ofm_w != (ifm_w) * stride_w + max(kernel_w - stride_w, 0) |
| 229 | ): |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 230 | return False |
| 231 | |
| 232 | return self.check_convolution_restrictions(op) |
| 233 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 234 | def check_pooling_restrictions(self, op): |
| 235 | # check stride |
Dwight Lidman | 0538a77 | 2020-05-06 14:09:17 +0200 | [diff] [blame] | 236 | if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 237 | return False |
| 238 | |
| 239 | # check data type |
| 240 | ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm() |
| 241 | if ifm_tensor.dtype != ofm_tensor.dtype: |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 242 | if op.type != "ReduceSum": |
| 243 | return False |
| 244 | # TODO: else check ReduceSum restrictions. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 245 | |
| 246 | # check batch size |
| 247 | if ifm_tensor.shape[0] != 1: |
| 248 | return False |
| 249 | |
| 250 | if op.type in self.avg_pooling_ops: |
| 251 | # check kernel size |
| 252 | if op.attrs["padding"] == b"SAME" and (op.attrs["filter_width"] > 8 or op.attrs["filter_height"] > 8): |
| 253 | return False |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 254 | if op.attrs["padding"] == b"VALID" and ( |
| 255 | op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256 |
| 256 | ): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 257 | return False |
| 258 | |
| 259 | if op.type in self.max_pooling_ops: |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 260 | # check kernel size (any padding) |
| 261 | if op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 262 | return False |
| 263 | return True |
| 264 | |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 265 | def check_resize_restrictions(self, op): |
| 266 | # check unsupported upscaling factor |
| 267 | if op.type == "ResizeBilinear": |
Charles Xu | 9a03fdf | 2020-07-02 15:12:40 +0200 | [diff] [blame] | 268 | if op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1: |
| 269 | return True |
Charles Xu | 36ffaf3 | 2020-08-05 15:40:44 +0200 | [diff] [blame^] | 270 | if op.inputs[0].shape == op.outputs[0].shape: |
| 271 | return True |
Dwight Lidman | 42fed94 | 2020-05-29 09:37:03 +0200 | [diff] [blame] | 272 | upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2] |
| 273 | out_shape = op.outputs[0].shape[1:3] |
| 274 | if not op.attrs["align_corners"] and out_shape != upscaled_shape: |
| 275 | return False |
| 276 | elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]: |
| 277 | return False |
| 278 | return True |
| 279 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 280 | def check_vector_product_restrictions(self, op): |
| 281 | # check data type |
| 282 | ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm() |
| 283 | if weight_tensor.element_size() > 1: |
| 284 | return False |
| 285 | |
| 286 | return True |
| 287 | |
| 288 | def check_element_wise_restrictions(self, op): |
| 289 | # check data type |
| 290 | ifm_tensor, ifm2_tensor, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm() |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 291 | # input and output datatype must match for these operators |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 292 | if ( |
| 293 | op.type in self.binary_elem_wise_min_max_ops | self.unary_elem_wise_main_ops |
| 294 | and ifm_tensor.dtype != ofm_tensor.dtype |
| 295 | ): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 296 | return False |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 297 | if op.type in self.binary_elem_wise_add_mul_sub: |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 298 | # both inputs must have same type |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 299 | if ifm_tensor.dtype != ifm2_tensor.dtype: |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 300 | return False |
| 301 | # signed input check |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 302 | if ifm_tensor.dtype.type & BaseType.Signed: |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 303 | # output must be signed |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 304 | if ofm_tensor.dtype.type & BaseType.Unsigned: |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 305 | return False |
| 306 | # and 8, 16 or 32-bit |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 307 | if ofm_tensor.element_size() not in (1, 2, 4): |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 308 | return False |
| 309 | # unsigned input check, output must be same type or int32 |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 310 | if ifm_tensor.dtype.type & BaseType.Unsigned and not ( |
| 311 | ifm_tensor.dtype == ofm_tensor.dtype or ofm_tensor.dtype == DataType.int32 |
| 312 | ): |
Fredrik Svedberg | 388e9c2 | 2020-05-25 16:32:00 +0200 | [diff] [blame] | 313 | return False |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 314 | |
| 315 | # check batch size |
Dwight Lidman | f995db7 | 2020-04-27 11:15:12 +0200 | [diff] [blame] | 316 | if len(ifm_tensor.shape) > 2 and ifm_tensor.shape[0] != 1: |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 317 | return False |
| 318 | if op.type in self.binary_elem_wise_main_ops: # if op type is unary, ifm2_tensor is None |
Dwight Lidman | f995db7 | 2020-04-27 11:15:12 +0200 | [diff] [blame] | 319 | if len(ifm2_tensor.shape) > 2 and ifm2_tensor.shape[0] != 1: |
| 320 | return False |
Dwight Lidman | 332a704 | 2020-06-11 15:32:42 +0200 | [diff] [blame] | 321 | |
| 322 | # negative alpha values are not supported |
| 323 | if op.type == "LeakyRelu" and op.attrs["alpha"] < 0: |
| 324 | return False |
| 325 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 326 | return True |
| 327 | |
| 328 | def check_memory_only_restrictions(self, op): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 329 | if op.type == "StridedSlice": |
Patrik Gustavsson | cf72890 | 2020-04-30 08:57:23 +0200 | [diff] [blame] | 330 | # check stride size |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 331 | if len(op.inputs) > 3 and any(stride != 1 for stride in op.inputs[3].values): |
| 332 | return False |
Patrik Gustavsson | cf72890 | 2020-04-30 08:57:23 +0200 | [diff] [blame] | 333 | # check ellipsis_mask |
| 334 | if op.attrs["ellipsis_mask"] != 0: |
| 335 | return False |
| 336 | # check if both new_axis_mask and shrink_axis_mask have bit set |
| 337 | if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0: |
| 338 | return False |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 339 | return True |
Dwight Lidman | ebe26c7 | 2020-06-09 11:40:54 +0200 | [diff] [blame] | 340 | |
| 341 | def check_quantization_restrictions(self, op): |
| 342 | # makes sure IFM1, IFM2 and OFM quantization are equal for binary ops |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 343 | if ( |
| 344 | len(op.inputs) == 2 |
| 345 | and not op.inputs[0].quantization == op.inputs[1].quantization == op.outputs[0].quantization |
| 346 | ): |
| 347 | print( |
| 348 | "Warning: Input/output tensors with different quantization is unsupported for the", op.type, "operator" |
| 349 | ) |
Dwight Lidman | ebe26c7 | 2020-06-09 11:40:54 +0200 | [diff] [blame] | 350 | return False |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 351 | return True |
| 352 | |
| 353 | def check_activation_ops(self, op): |
| 354 | if op.type == "Softmax": |
| 355 | if not self.softmax_support: |
| 356 | return False |
| 357 | |
| 358 | ifm_tensor = op.inputs[0] |
| 359 | ofm_tensor = op.outputs[0] |
| 360 | |
| 361 | # check data type |
| 362 | if ifm_tensor.dtype != ofm_tensor.dtype: |
| 363 | return False |
| 364 | |
| 365 | if ifm_tensor.dtype != DataType.int16: |
| 366 | return False # TODO: Implement support for 8-bit Softmax |
| 367 | |
| 368 | # check batch size |
| 369 | if len(ifm_tensor.shape) in (2, 4) and ifm_tensor.shape[0] != 1: |
| 370 | return False |
| 371 | |
| 372 | return True |