blob: cbd5d6ccd619bb4eca5b6b2af4a33c5b8292ee56 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020018from .data_type import BaseType, DataType
Tim Hall79d07d22020-04-27 18:20:16 +010019
20
21class SupportedOperators:
22 def __init__(self):
23 # Categorised lists of supported operators
24 self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead"))
Jacob Bohlincf7da102020-05-20 09:03:40 +020025 self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D"))
Tim Hall79d07d22020-04-27 18:20:16 +010026 self.depthwise_convolution_ops = set(
27 ("DepthwiseConv2dBiasAct", "DepthwiseConv2dNative", "QuantizedDepthwiseConv2D")
28 )
Jacob Bohlincf7da102020-05-20 09:03:40 +020029 self.transpose_convolution_ops = set(("Conv2DBackpropInput",))
Tim Hall79d07d22020-04-27 18:20:16 +010030 self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct"))
31 self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct"))
32 self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops
Dwight Lidman42fed942020-05-29 09:37:03 +020033 self.resizing_ops = set(("ResizeBilinear",))
Tim Hall79d07d22020-04-27 18:20:16 +010034 self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct"))
35 self.mac_main_ops = (
36 # convolutions
37 self.convolution_ops
38 # depth-wise convolutions
39 | self.depthwise_convolution_ops
Jacob Bohlincf7da102020-05-20 09:03:40 +020040 # transpose convolutions
41 | self.transpose_convolution_ops
Tim Hall79d07d22020-04-27 18:20:16 +010042 # pooling
43 | self.pooling_ops
Dwight Lidman42fed942020-05-29 09:37:03 +020044 # resizing/upscaling
45 | self.resizing_ops
Tim Hall79d07d22020-04-27 18:20:16 +010046 # FC layers
47 | self.fc_vector_products
48 # RNN/LSTM/GRU
49 | set(("BlockLSTM"))
50 )
Dwight Lidmanf995db72020-04-27 11:15:12 +020051 self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs"))
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020052 self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum"))
53 self.binary_elem_wise_add_mul_sub = set(
Tim Hall79d07d22020-04-27 18:20:16 +010054 (
Tim Hall79d07d22020-04-27 18:20:16 +010055 "AddAct",
56 "MulAct",
57 "SubAct",
58 "QuantizedAdd",
59 "QuantizedSub",
60 "QuantizedMul",
61 "Mul",
62 "Add",
63 "Sub",
Tim Hall79d07d22020-04-27 18:20:16 +010064 )
65 )
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020066 self.binary_elem_wise_main_ops = self.binary_elem_wise_min_max_ops | self.binary_elem_wise_add_mul_sub
Dwight Lidmanf995db72020-04-27 11:15:12 +020067 self.elem_wise_main_ops = self.binary_elem_wise_main_ops | self.unary_elem_wise_main_ops
Tim Hall79d07d22020-04-27 18:20:16 +010068 self.activation_ops = set(
69 ("QuantizedRelu", "QuantizedRelu1", "QuantizedRelu6", "Relu", "Relu6", "ReluN1To1", "Sigmoid", "Tanh")
70 )
71 self.npu_post_ops = (
72 # activation functions
73 self.activation_ops
74 # concatenation write direction
75 | set(("ConcatSliceWrite"))
76 # bias add and batch norm
77 | set(("QuantizedBiasAdd", "Requantize", "QuantizedBatchNorm", "BiasAdd", "FusedBatchNorm"))
78 )
Charles Xu53d47522020-05-04 11:32:05 +020079 self.split_ops = set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped", "Unpack"))
Tim Hall79d07d22020-04-27 18:20:16 +010080 self.concat_ops = set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped", "Pack"))
81 self.memory_only_ops = (
82 set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims")) | self.concat_ops | self.split_ops
83 )
84 self.supported_fused_activations = set(("Relu", "Relu6", "ReluN1To1", "Tanh", "Sigmoid"))
85 self.supported_operators = (
86 self.npu_pre_ops | self.mac_main_ops | self.elem_wise_main_ops | self.npu_post_ops | self.memory_only_ops
87 )
88 # Setup supported operator restriction checkers
89 self.supported_operator_restrictions = {}
90 self.supported_operator_restrictions.update(
91 {op: self.check_convolution_restrictions for op in self.convolution_ops}
92 )
93 self.supported_operator_restrictions.update(
94 {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
95 )
Jacob Bohlincf7da102020-05-20 09:03:40 +020096 self.supported_operator_restrictions.update(
97 {op: self.check_transpose_convolution_restrictions for op in self.transpose_convolution_ops}
98 )
Tim Hall79d07d22020-04-27 18:20:16 +010099 self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
Dwight Lidman42fed942020-05-29 09:37:03 +0200100 self.supported_operator_restrictions.update({op: self.check_resize_restrictions for op in self.resizing_ops})
Tim Hall79d07d22020-04-27 18:20:16 +0100101 self.supported_operator_restrictions.update(
102 {op: self.check_vector_product_restrictions for op in self.fc_vector_products}
103 )
104 self.supported_operator_restrictions.update(
105 {op: self.check_element_wise_restrictions for op in self.elem_wise_main_ops}
106 )
107 self.supported_operator_restrictions.update(
108 {op: self.check_memory_only_restrictions for op in self.memory_only_ops}
109 )
110
111 def is_operator_supported(self, op):
112 if op.type not in self.supported_operators:
113 return False
114 if not self.check_generic_restrictions(op):
115 return False
116 if op.type in self.supported_operator_restrictions:
117 return self.supported_operator_restrictions[op.type](op)
118 return True
119
120 def check_generic_restrictions(self, op):
121 # check fully defined shapes
122 for t in op.inputs + op.outputs:
123 if not t.has_fully_defined_shape():
124 print("Warning:", op, "has inputs/outputs of undefined shape, placing on CPU")
125 return False
126
127 # check data type
128 tensors = [t for t in op.get_ifm_ifm2_weights_ofm() if t is not None]
129 if not tensors:
130 tensors = op.inputs
131 for t in tensors:
132 if not (t.dtype.type & BaseType.Int):
133 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200134 if t.element_size() > 2 and op.type not in ("Requantize") | self.binary_elem_wise_add_mul_sub:
Tim Hall79d07d22020-04-27 18:20:16 +0100135 return False
136 # check size
137 if any(dim > 65536 for dim in t.shape):
138 return False
139
140 # check fused activations
141 if (
142 "fused_activation_function" in op.attrs
143 and op.attrs["fused_activation_function"] is not None
144 and op.attrs["fused_activation_function"] not in self.supported_fused_activations
145 ):
146 return False
147 return True
148
149 def check_convolution_restrictions(self, op):
150 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200151 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100152 return False
153
154 # check dilation
155 dilation_w_factor = op.attrs.get("dilation_w_factor", 1)
156 dilation_h_factor = op.attrs.get("dilation_h_factor", 1)
157 if dilation_w_factor > 2 or dilation_h_factor > 2:
158 return False
159
160 # check data type
161 ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
162 if weight_tensor.element_size() > 1:
163 return False
164
165 # check kernel size
166 dilated_weight_w = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_w_factor - 1)
167 dilated_weight_h = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_h_factor - 1)
168 if (
169 dilated_weight_w > 64
170 or dilated_weight_h > 64
171 or dilated_weight_w * dilated_weight_h * weight_tensor.shape[2] > 127 * 65536
172 ):
173 return False
174
175 # check batch size
176 if ifm_tensor.shape[0] != 1:
177 return False
178 return True
179
180 def check_depthwise_convolution_restrictions(self, op):
181 # check depth
182 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
183 if op.attrs["depth_multiplier"] > 1 and not (
184 (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"])
185 ):
186 return False
187 return self.check_convolution_restrictions(op)
188
Jacob Bohlincf7da102020-05-20 09:03:40 +0200189 def check_transpose_convolution_restrictions(self, op):
190 # check stride
191 stride_h, stride_w = op.attrs["stride_h"], op.attrs["stride_w"]
192 if stride_h != stride_w != 2:
193 return False
194
195 # check output dimensions
196 ifm_tensor, weight_tensor, _, ofm_tensor = op.get_ifm_weights_biases_ofm()
197 ifm_h, ifm_w = ifm_tensor.shape[1], ifm_tensor.shape[2]
198 ofm_h, ofm_w = ofm_tensor.shape[1], ofm_tensor.shape[2]
199 if op.attrs["padding"] == b"SAME":
200 if (ofm_h != ifm_h * stride_h) or (ofm_w != ifm_w * stride_w):
201 return False
202 elif op.attrs["padding"] == b"VALID":
203 kernel_h, kernel_w = weight_tensor.shape[0], weight_tensor.shape[1]
204 if ((ofm_h != (ifm_h) * stride_h + max(kernel_h - stride_h, 0))
205 or (ofm_w != (ifm_w) * stride_w + max(kernel_w - stride_w, 0))):
206 return False
207
208 return self.check_convolution_restrictions(op)
209
210
Tim Hall79d07d22020-04-27 18:20:16 +0100211 def check_pooling_restrictions(self, op):
212 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200213 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100214 return False
215
216 # check data type
217 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
218 if ifm_tensor.dtype != ofm_tensor.dtype:
219 return False
220
221 # check batch size
222 if ifm_tensor.shape[0] != 1:
223 return False
224
225 if op.type in self.avg_pooling_ops:
226 # check kernel size
227 if op.attrs["padding"] == b"SAME" and (op.attrs["filter_width"] > 8 or op.attrs["filter_height"] > 8):
228 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200229 if (op.attrs["padding"] == b"VALID" and
230 (op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256)):
Tim Hall79d07d22020-04-27 18:20:16 +0100231 return False
232
233 if op.type in self.max_pooling_ops:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200234 # check kernel size (any padding)
235 if op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256:
Tim Hall79d07d22020-04-27 18:20:16 +0100236 return False
237 return True
238
Dwight Lidman42fed942020-05-29 09:37:03 +0200239 def check_resize_restrictions(self, op):
240 # check unsupported upscaling factor
241 if op.type == "ResizeBilinear":
242 upscaled_shape = [op.inputs[0].shape[1] * 2, op.inputs[0].shape[2] * 2]
243 out_shape = op.outputs[0].shape[1:3]
244 if not op.attrs["align_corners"] and out_shape != upscaled_shape:
245 return False
246 elif op.attrs["align_corners"] and out_shape != [upscaled_shape[0] - 1, upscaled_shape[1] - 1]:
247 return False
248 return True
249
Tim Hall79d07d22020-04-27 18:20:16 +0100250 def check_vector_product_restrictions(self, op):
251 # check data type
252 ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
253 if weight_tensor.element_size() > 1:
254 return False
255
256 return True
257
258 def check_element_wise_restrictions(self, op):
259 # check data type
260 ifm_tensor, ifm2_tensor, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200261 # input and output datatype must match for these operators
262 if (op.type in self.binary_elem_wise_min_max_ops | self.unary_elem_wise_main_ops and
263 ifm_tensor.dtype != ofm_tensor.dtype):
Tim Hall79d07d22020-04-27 18:20:16 +0100264 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200265 if (op.type in self.binary_elem_wise_add_mul_sub):
266 # both inputs must have same type
267 if (ifm_tensor.dtype != ifm2_tensor.dtype):
268 return False
269 # signed input check
270 if (ifm_tensor.dtype.type & BaseType.Signed):
271 # output must be signed
272 if (ofm_tensor.dtype.type & BaseType.Unsigned):
273 return False
274 # and 8, 16 or 32-bit
275 if (ofm_tensor.element_size() not in (1, 2, 4)):
276 return False
277 # unsigned input check, output must be same type or int32
278 if (ifm_tensor.dtype.type & BaseType.Unsigned and not
279 (ifm_tensor.dtype == ofm_tensor.dtype or
280 ofm_tensor.dtype == DataType.int32)):
281 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100282
283 # check batch size
Dwight Lidmanf995db72020-04-27 11:15:12 +0200284 if len(ifm_tensor.shape) > 2 and ifm_tensor.shape[0] != 1:
285 return False
286 if op.type in self.binary_elem_wise_main_ops: # if op type is unary, ifm2_tensor is None
287 if len(ifm2_tensor.shape) > 2 and ifm2_tensor.shape[0] != 1:
288 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100289 return True
290
291 def check_memory_only_restrictions(self, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100292 if op.type == "StridedSlice":
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200293 # check stride size
Tim Hall79d07d22020-04-27 18:20:16 +0100294 if len(op.inputs) > 3 and any(stride != 1 for stride in op.inputs[3].values):
295 return False
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200296 # check ellipsis_mask
297 if op.attrs["ellipsis_mask"] != 0:
298 return False
299 # check if both new_axis_mask and shrink_axis_mask have bit set
300 if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0:
301 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100302 return True