blob: 3ec3429a2b0efe0a069a1feb7925b93706a9b2b3 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Tim Hallc30f4952020-06-15 20:47:35 +010018from .data_type import BaseType
19from .data_type import DataType
Tim Hall79d07d22020-04-27 18:20:16 +010020
21
22class SupportedOperators:
23 def __init__(self):
24 # Categorised lists of supported operators
25 self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead"))
Jacob Bohlincf7da102020-05-20 09:03:40 +020026 self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D"))
Tim Hall79d07d22020-04-27 18:20:16 +010027 self.depthwise_convolution_ops = set(
28 ("DepthwiseConv2dBiasAct", "DepthwiseConv2dNative", "QuantizedDepthwiseConv2D")
29 )
Jacob Bohlincf7da102020-05-20 09:03:40 +020030 self.transpose_convolution_ops = set(("Conv2DBackpropInput",))
Tim Hall79d07d22020-04-27 18:20:16 +010031 self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct"))
32 self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct"))
33 self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops
Dwight Lidman42fed942020-05-29 09:37:03 +020034 self.resizing_ops = set(("ResizeBilinear",))
Tim Hall79d07d22020-04-27 18:20:16 +010035 self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct"))
36 self.mac_main_ops = (
37 # convolutions
38 self.convolution_ops
39 # depth-wise convolutions
40 | self.depthwise_convolution_ops
Jacob Bohlincf7da102020-05-20 09:03:40 +020041 # transpose convolutions
42 | self.transpose_convolution_ops
Tim Hall79d07d22020-04-27 18:20:16 +010043 # pooling
44 | self.pooling_ops
Dwight Lidman42fed942020-05-29 09:37:03 +020045 # resizing/upscaling
46 | self.resizing_ops
Tim Hall79d07d22020-04-27 18:20:16 +010047 # FC layers
48 | self.fc_vector_products
49 # RNN/LSTM/GRU
50 | set(("BlockLSTM"))
51 )
Dwight Lidmanf995db72020-04-27 11:15:12 +020052 self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs"))
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020053 self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum"))
54 self.binary_elem_wise_add_mul_sub = set(
Tim Hallc30f4952020-06-15 20:47:35 +010055 ("AddAct", "MulAct", "SubAct", "QuantizedAdd", "QuantizedSub", "QuantizedMul", "Mul", "Add", "Sub",)
Tim Hall79d07d22020-04-27 18:20:16 +010056 )
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020057 self.binary_elem_wise_main_ops = self.binary_elem_wise_min_max_ops | self.binary_elem_wise_add_mul_sub
Dwight Lidmanf995db72020-04-27 11:15:12 +020058 self.elem_wise_main_ops = self.binary_elem_wise_main_ops | self.unary_elem_wise_main_ops
Tim Hall79d07d22020-04-27 18:20:16 +010059 self.activation_ops = set(
60 ("QuantizedRelu", "QuantizedRelu1", "QuantizedRelu6", "Relu", "Relu6", "ReluN1To1", "Sigmoid", "Tanh")
61 )
62 self.npu_post_ops = (
63 # activation functions
64 self.activation_ops
65 # concatenation write direction
66 | set(("ConcatSliceWrite"))
67 # bias add and batch norm
68 | set(("QuantizedBiasAdd", "Requantize", "QuantizedBatchNorm", "BiasAdd", "FusedBatchNorm"))
Jacob Bohlin9fbc4912020-06-29 11:58:50 +020069 # Quantization
70 | set(("Quantize",))
Tim Hall79d07d22020-04-27 18:20:16 +010071 )
Charles Xu53d47522020-05-04 11:32:05 +020072 self.split_ops = set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped", "Unpack"))
Tim Hall79d07d22020-04-27 18:20:16 +010073 self.concat_ops = set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped", "Pack"))
74 self.memory_only_ops = (
75 set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims")) | self.concat_ops | self.split_ops
76 )
77 self.supported_fused_activations = set(("Relu", "Relu6", "ReluN1To1", "Tanh", "Sigmoid"))
78 self.supported_operators = (
79 self.npu_pre_ops | self.mac_main_ops | self.elem_wise_main_ops | self.npu_post_ops | self.memory_only_ops
80 )
81 # Setup supported operator restriction checkers
82 self.supported_operator_restrictions = {}
83 self.supported_operator_restrictions.update(
84 {op: self.check_convolution_restrictions for op in self.convolution_ops}
85 )
86 self.supported_operator_restrictions.update(
87 {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
88 )
Jacob Bohlincf7da102020-05-20 09:03:40 +020089 self.supported_operator_restrictions.update(
90 {op: self.check_transpose_convolution_restrictions for op in self.transpose_convolution_ops}
91 )
Tim Hall79d07d22020-04-27 18:20:16 +010092 self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
Dwight Lidman42fed942020-05-29 09:37:03 +020093 self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops})
Tim Hall79d07d22020-04-27 18:20:16 +010094 self.supported_operator_restrictions.update(
95 {op: self.check_vector_product_restrictions for op in self.fc_vector_products}
96 )
97 self.supported_operator_restrictions.update(
98 {op: self.check_element_wise_restrictions for op in self.elem_wise_main_ops}
99 )
100 self.supported_operator_restrictions.update(
101 {op: self.check_memory_only_restrictions for op in self.memory_only_ops}
102 )
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200103 self.supported_operator_restrictions.update(
104 {op: self.check_quantization_restrictions for op in self.binary_elem_wise_min_max_ops}
105 )
Tim Hall79d07d22020-04-27 18:20:16 +0100106
107 def is_operator_supported(self, op):
108 if op.type not in self.supported_operators:
109 return False
110 if not self.check_generic_restrictions(op):
111 return False
112 if op.type in self.supported_operator_restrictions:
113 return self.supported_operator_restrictions[op.type](op)
114 return True
115
116 def check_generic_restrictions(self, op):
117 # check fully defined shapes
118 for t in op.inputs + op.outputs:
119 if not t.has_fully_defined_shape():
120 print("Warning:", op, "has inputs/outputs of undefined shape, placing on CPU")
121 return False
122
123 # check data type
124 tensors = [t for t in op.get_ifm_ifm2_weights_ofm() if t is not None]
125 if not tensors:
126 tensors = op.inputs
127 for t in tensors:
128 if not (t.dtype.type & BaseType.Int):
129 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200130 if t.element_size() > 2 and op.type not in ("Requantize") | self.binary_elem_wise_add_mul_sub:
Tim Hall79d07d22020-04-27 18:20:16 +0100131 return False
132 # check size
133 if any(dim > 65536 for dim in t.shape):
134 return False
135
136 # check fused activations
137 if (
138 "fused_activation_function" in op.attrs
139 and op.attrs["fused_activation_function"] is not None
140 and op.attrs["fused_activation_function"] not in self.supported_fused_activations
141 ):
142 return False
143 return True
144
145 def check_convolution_restrictions(self, op):
146 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200147 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100148 return False
149
150 # check dilation
151 dilation_w_factor = op.attrs.get("dilation_w_factor", 1)
152 dilation_h_factor = op.attrs.get("dilation_h_factor", 1)
153 if dilation_w_factor > 2 or dilation_h_factor > 2:
154 return False
155
156 # check data type
157 ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
158 if weight_tensor.element_size() > 1:
159 return False
160
161 # check kernel size
162 dilated_weight_w = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_w_factor - 1)
163 dilated_weight_h = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_h_factor - 1)
164 if (
165 dilated_weight_w > 64
166 or dilated_weight_h > 64
167 or dilated_weight_w * dilated_weight_h * weight_tensor.shape[2] > 127 * 65536
168 ):
169 return False
170
171 # check batch size
172 if ifm_tensor.shape[0] != 1:
173 return False
174 return True
175
176 def check_depthwise_convolution_restrictions(self, op):
177 # check depth
178 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
179 if op.attrs["depth_multiplier"] > 1 and not (
180 (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"])
181 ):
182 return False
183 return self.check_convolution_restrictions(op)
184
Jacob Bohlincf7da102020-05-20 09:03:40 +0200185 def check_transpose_convolution_restrictions(self, op):
186 # check stride
187 stride_h, stride_w = op.attrs["stride_h"], op.attrs["stride_w"]
188 if stride_h != stride_w != 2:
189 return False
190
191 # check output dimensions
192 ifm_tensor, weight_tensor, _, ofm_tensor = op.get_ifm_weights_biases_ofm()
193 ifm_h, ifm_w = ifm_tensor.shape[1], ifm_tensor.shape[2]
194 ofm_h, ofm_w = ofm_tensor.shape[1], ofm_tensor.shape[2]
195 if op.attrs["padding"] == b"SAME":
196 if (ofm_h != ifm_h * stride_h) or (ofm_w != ifm_w * stride_w):
197 return False
198 elif op.attrs["padding"] == b"VALID":
199 kernel_h, kernel_w = weight_tensor.shape[0], weight_tensor.shape[1]
Tim Hallc30f4952020-06-15 20:47:35 +0100200 if (ofm_h != (ifm_h) * stride_h + max(kernel_h - stride_h, 0)) or (
201 ofm_w != (ifm_w) * stride_w + max(kernel_w - stride_w, 0)
202 ):
Jacob Bohlincf7da102020-05-20 09:03:40 +0200203 return False
204
205 return self.check_convolution_restrictions(op)
206
Tim Hall79d07d22020-04-27 18:20:16 +0100207 def check_pooling_restrictions(self, op):
208 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200209 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100210 return False
211
212 # check data type
213 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
214 if ifm_tensor.dtype != ofm_tensor.dtype:
215 return False
216
217 # check batch size
218 if ifm_tensor.shape[0] != 1:
219 return False
220
221 if op.type in self.avg_pooling_ops:
222 # check kernel size
223 if op.attrs["padding"] == b"SAME" and (op.attrs["filter_width"] > 8 or op.attrs["filter_height"] > 8):
224 return False
Tim Hallc30f4952020-06-15 20:47:35 +0100225 if op.attrs["padding"] == b"VALID" and (
226 op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256
227 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100228 return False
229
230 if op.type in self.max_pooling_ops:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200231 # check kernel size (any padding)
232 if op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256:
Tim Hall79d07d22020-04-27 18:20:16 +0100233 return False
234 return True
235
Dwight Lidman42fed942020-05-29 09:37:03 +0200236 def check_resize_restrictions(self, op):
237 # check unsupported upscaling factor
238 if op.type == "ResizeBilinear":
Charles Xu9a03fdf2020-07-02 15:12:40 +0200239 if op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1:
240 return True
Dwight Lidman42fed942020-05-29 09:37:03 +0200241 upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
242 out_shape = op.outputs[0].shape[1:3]
243 if not op.attrs["align_corners"] and out_shape != upscaled_shape:
244 return False
245 elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
246 return False
247 return True
248
Tim Hall79d07d22020-04-27 18:20:16 +0100249 def check_vector_product_restrictions(self, op):
250 # check data type
251 ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
252 if weight_tensor.element_size() > 1:
253 return False
254
255 return True
256
257 def check_element_wise_restrictions(self, op):
258 # check data type
259 ifm_tensor, ifm2_tensor, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200260 # input and output datatype must match for these operators
Tim Hallc30f4952020-06-15 20:47:35 +0100261 if (
262 op.type in self.binary_elem_wise_min_max_ops | self.unary_elem_wise_main_ops
263 and ifm_tensor.dtype != ofm_tensor.dtype
264 ):
Tim Hall79d07d22020-04-27 18:20:16 +0100265 return False
Tim Hallc30f4952020-06-15 20:47:35 +0100266 if op.type in self.binary_elem_wise_add_mul_sub:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200267 # both inputs must have same type
Tim Hallc30f4952020-06-15 20:47:35 +0100268 if ifm_tensor.dtype != ifm2_tensor.dtype:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200269 return False
270 # signed input check
Tim Hallc30f4952020-06-15 20:47:35 +0100271 if ifm_tensor.dtype.type & BaseType.Signed:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200272 # output must be signed
Tim Hallc30f4952020-06-15 20:47:35 +0100273 if ofm_tensor.dtype.type & BaseType.Unsigned:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200274 return False
275 # and 8, 16 or 32-bit
Tim Hallc30f4952020-06-15 20:47:35 +0100276 if ofm_tensor.element_size() not in (1, 2, 4):
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200277 return False
278 # unsigned input check, output must be same type or int32
Tim Hallc30f4952020-06-15 20:47:35 +0100279 if ifm_tensor.dtype.type & BaseType.Unsigned and not (
280 ifm_tensor.dtype == ofm_tensor.dtype or ofm_tensor.dtype == DataType.int32
281 ):
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200282 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100283
284 # check batch size
Dwight Lidmanf995db72020-04-27 11:15:12 +0200285 if len(ifm_tensor.shape) > 2 and ifm_tensor.shape[0] != 1:
Tim Hallc30f4952020-06-15 20:47:35 +0100286 return False
287 if op.type in self.binary_elem_wise_main_ops: # if op type is unary, ifm2_tensor is None
Dwight Lidmanf995db72020-04-27 11:15:12 +0200288 if len(ifm2_tensor.shape) > 2 and ifm2_tensor.shape[0] != 1:
289 return False
Dwight Lidman332a7042020-06-11 15:32:42 +0200290
291 # negative alpha values are not supported
292 if op.type == "LeakyRelu" and op.attrs["alpha"] < 0:
293 return False
294
Tim Hall79d07d22020-04-27 18:20:16 +0100295 return True
296
297 def check_memory_only_restrictions(self, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100298 if op.type == "StridedSlice":
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200299 # check stride size
Tim Hall79d07d22020-04-27 18:20:16 +0100300 if len(op.inputs) > 3 and any(stride != 1 for stride in op.inputs[3].values):
301 return False
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200302 # check ellipsis_mask
303 if op.attrs["ellipsis_mask"] != 0:
304 return False
305 # check if both new_axis_mask and shrink_axis_mask have bit set
306 if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0:
307 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100308 return True
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200309
310 def check_quantization_restrictions(self, op):
311 # makes sure IFM1, IFM2 and OFM quantization are equal for binary ops
312 if (len(op.inputs) == 2
313 and not op.inputs[0].quantization == op.inputs[1].quantization == op.outputs[0].quantization):
314 print("Warning: Input/output tensors with different quantization is unsupported for the", op.type,
315 "operator")
316 return False
317 return True