blob: ce3fa6099649138735d68b66d9acba4eff132667 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# The SupportedOperators class which is a collection of all supported operators and parameter checks.
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020018from .data_type import BaseType, DataType
Tim Hall79d07d22020-04-27 18:20:16 +010019
20
21class SupportedOperators:
22 def __init__(self):
23 # Categorised lists of supported operators
24 self.npu_pre_ops = set(("QuantizedResizeBilinear", "SplitSliceRead"))
25 self.convolution_ops = set(("Conv2DBiasAct", "Conv2D", "QuantizedConv2D", "Conv2DBackpropInputSwitched"))
26 self.depthwise_convolution_ops = set(
27 ("DepthwiseConv2dBiasAct", "DepthwiseConv2dNative", "QuantizedDepthwiseConv2D")
28 )
29 self.max_pooling_ops = set(("QuantizedMaxPool", "MaxPool", "MaxPoolAct"))
30 self.avg_pooling_ops = set(("QuantizedAvgPool", "AvgPool", "AvgPoolAct"))
31 self.pooling_ops = self.max_pooling_ops | self.avg_pooling_ops
32 self.fc_vector_products = set(("QuantizedMatMul", "MatMul", "FullyConnectedAct"))
33 self.mac_main_ops = (
34 # convolutions
35 self.convolution_ops
36 # depth-wise convolutions
37 | self.depthwise_convolution_ops
38 # pooling
39 | self.pooling_ops
40 # FC layers
41 | self.fc_vector_products
42 # RNN/LSTM/GRU
43 | set(("BlockLSTM"))
Dwight Lidman3ec04ac2020-04-30 11:54:48 +020044 # deconvolution
45 | set(("ResizeBilinear",))
Tim Hall79d07d22020-04-27 18:20:16 +010046 )
Dwight Lidmanf995db72020-04-27 11:15:12 +020047 self.unary_elem_wise_main_ops = set(("LeakyRelu", "Abs"))
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020048 self.binary_elem_wise_min_max_ops = set(("Minimum", "Maximum"))
49 self.binary_elem_wise_add_mul_sub = set(
Tim Hall79d07d22020-04-27 18:20:16 +010050 (
Tim Hall79d07d22020-04-27 18:20:16 +010051 "AddAct",
52 "MulAct",
53 "SubAct",
54 "QuantizedAdd",
55 "QuantizedSub",
56 "QuantizedMul",
57 "Mul",
58 "Add",
59 "Sub",
Tim Hall79d07d22020-04-27 18:20:16 +010060 )
61 )
Fredrik Svedberg388e9c22020-05-25 16:32:00 +020062 self.binary_elem_wise_main_ops = self.binary_elem_wise_min_max_ops | self.binary_elem_wise_add_mul_sub
Dwight Lidmanf995db72020-04-27 11:15:12 +020063 self.elem_wise_main_ops = self.binary_elem_wise_main_ops | self.unary_elem_wise_main_ops
Tim Hall79d07d22020-04-27 18:20:16 +010064 self.activation_ops = set(
65 ("QuantizedRelu", "QuantizedRelu1", "QuantizedRelu6", "Relu", "Relu6", "ReluN1To1", "Sigmoid", "Tanh")
66 )
67 self.npu_post_ops = (
68 # activation functions
69 self.activation_ops
70 # concatenation write direction
71 | set(("ConcatSliceWrite"))
72 # bias add and batch norm
73 | set(("QuantizedBiasAdd", "Requantize", "QuantizedBatchNorm", "BiasAdd", "FusedBatchNorm"))
74 )
Charles Xu53d47522020-05-04 11:32:05 +020075 self.split_ops = set(("Split", "SplitV", "StridedSlice", "Slice", "UnpackReshaped", "Unpack"))
Tim Hall79d07d22020-04-27 18:20:16 +010076 self.concat_ops = set(("Concat", "ConcatV2", "QuantizedConcat", "ConcatTFLite", "PackReshaped", "Pack"))
77 self.memory_only_ops = (
78 set(("Squeeze", "Reshape", "QuantizedReshape", "ExpandDims")) | self.concat_ops | self.split_ops
79 )
80 self.supported_fused_activations = set(("Relu", "Relu6", "ReluN1To1", "Tanh", "Sigmoid"))
81 self.supported_operators = (
82 self.npu_pre_ops | self.mac_main_ops | self.elem_wise_main_ops | self.npu_post_ops | self.memory_only_ops
83 )
84 # Setup supported operator restriction checkers
85 self.supported_operator_restrictions = {}
86 self.supported_operator_restrictions.update(
87 {op: self.check_convolution_restrictions for op in self.convolution_ops}
88 )
89 self.supported_operator_restrictions.update(
90 {op: self.check_depthwise_convolution_restrictions for op in self.depthwise_convolution_ops}
91 )
92 self.supported_operator_restrictions.update({op: self.check_pooling_restrictions for op in self.pooling_ops})
93 self.supported_operator_restrictions.update(
94 {op: self.check_vector_product_restrictions for op in self.fc_vector_products}
95 )
96 self.supported_operator_restrictions.update(
97 {op: self.check_element_wise_restrictions for op in self.elem_wise_main_ops}
98 )
99 self.supported_operator_restrictions.update(
100 {op: self.check_memory_only_restrictions for op in self.memory_only_ops}
101 )
102
103 def is_operator_supported(self, op):
104 if op.type not in self.supported_operators:
105 return False
106 if not self.check_generic_restrictions(op):
107 return False
108 if op.type in self.supported_operator_restrictions:
109 return self.supported_operator_restrictions[op.type](op)
110 return True
111
112 def check_generic_restrictions(self, op):
113 # check fully defined shapes
114 for t in op.inputs + op.outputs:
115 if not t.has_fully_defined_shape():
116 print("Warning:", op, "has inputs/outputs of undefined shape, placing on CPU")
117 return False
118
119 # check data type
120 tensors = [t for t in op.get_ifm_ifm2_weights_ofm() if t is not None]
121 if not tensors:
122 tensors = op.inputs
123 for t in tensors:
124 if not (t.dtype.type & BaseType.Int):
125 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200126 if t.element_size() > 2 and op.type not in ("Requantize") | self.binary_elem_wise_add_mul_sub:
Tim Hall79d07d22020-04-27 18:20:16 +0100127 return False
128 # check size
129 if any(dim > 65536 for dim in t.shape):
130 return False
131
132 # check fused activations
133 if (
134 "fused_activation_function" in op.attrs
135 and op.attrs["fused_activation_function"] is not None
136 and op.attrs["fused_activation_function"] not in self.supported_fused_activations
137 ):
138 return False
139 return True
140
141 def check_convolution_restrictions(self, op):
142 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200143 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100144 return False
145
146 # check dilation
147 dilation_w_factor = op.attrs.get("dilation_w_factor", 1)
148 dilation_h_factor = op.attrs.get("dilation_h_factor", 1)
149 if dilation_w_factor > 2 or dilation_h_factor > 2:
150 return False
151
152 # check data type
153 ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
154 if weight_tensor.element_size() > 1:
155 return False
156
157 # check kernel size
158 dilated_weight_w = weight_tensor.shape[0] + (weight_tensor.shape[0] - 1) * (dilation_w_factor - 1)
159 dilated_weight_h = weight_tensor.shape[1] + (weight_tensor.shape[1] - 1) * (dilation_h_factor - 1)
160 if (
161 dilated_weight_w > 64
162 or dilated_weight_h > 64
163 or dilated_weight_w * dilated_weight_h * weight_tensor.shape[2] > 127 * 65536
164 ):
165 return False
166
167 # check batch size
168 if ifm_tensor.shape[0] != 1:
169 return False
170 return True
171
172 def check_depthwise_convolution_restrictions(self, op):
173 # check depth
174 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
175 if op.attrs["depth_multiplier"] > 1 and not (
176 (ifm_tensor.shape[3] == 1) and (ofm_tensor.shape[3] == op.attrs["depth_multiplier"])
177 ):
178 return False
179 return self.check_convolution_restrictions(op)
180
181 def check_pooling_restrictions(self, op):
182 # check stride
Dwight Lidman0538a772020-05-06 14:09:17 +0200183 if op.attrs["stride_w"] > 3 or op.attrs["stride_h"] > 3:
Tim Hall79d07d22020-04-27 18:20:16 +0100184 return False
185
186 # check data type
187 ifm_tensor, _, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
188 if ifm_tensor.dtype != ofm_tensor.dtype:
189 return False
190
191 # check batch size
192 if ifm_tensor.shape[0] != 1:
193 return False
194
195 if op.type in self.avg_pooling_ops:
196 # check kernel size
197 if op.attrs["padding"] == b"SAME" and (op.attrs["filter_width"] > 8 or op.attrs["filter_height"] > 8):
198 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200199 if (op.attrs["padding"] == b"VALID" and
200 (op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256)):
Tim Hall79d07d22020-04-27 18:20:16 +0100201 return False
202
203 if op.type in self.max_pooling_ops:
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200204 # check kernel size (any padding)
205 if op.attrs["filter_width"] * op.attrs["filter_height"] > 256 * 256 or op.attrs["filter_height"] > 256:
Tim Hall79d07d22020-04-27 18:20:16 +0100206 return False
207 return True
208
209 def check_vector_product_restrictions(self, op):
210 # check data type
211 ifm_tensor, _, weight_tensor, _ = op.get_ifm_ifm2_weights_ofm()
212 if weight_tensor.element_size() > 1:
213 return False
214
215 return True
216
217 def check_element_wise_restrictions(self, op):
218 # check data type
219 ifm_tensor, ifm2_tensor, _, ofm_tensor = op.get_ifm_ifm2_weights_ofm()
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200220 # input and output datatype must match for these operators
221 if (op.type in self.binary_elem_wise_min_max_ops | self.unary_elem_wise_main_ops and
222 ifm_tensor.dtype != ofm_tensor.dtype):
Tim Hall79d07d22020-04-27 18:20:16 +0100223 return False
Fredrik Svedberg388e9c22020-05-25 16:32:00 +0200224 if (op.type in self.binary_elem_wise_add_mul_sub):
225 # both inputs must have same type
226 if (ifm_tensor.dtype != ifm2_tensor.dtype):
227 return False
228 # signed input check
229 if (ifm_tensor.dtype.type & BaseType.Signed):
230 # output must be signed
231 if (ofm_tensor.dtype.type & BaseType.Unsigned):
232 return False
233 # and 8, 16 or 32-bit
234 if (ofm_tensor.element_size() not in (1, 2, 4)):
235 return False
236 # unsigned input check, output must be same type or int32
237 if (ifm_tensor.dtype.type & BaseType.Unsigned and not
238 (ifm_tensor.dtype == ofm_tensor.dtype or
239 ofm_tensor.dtype == DataType.int32)):
240 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100241
242 # check batch size
Dwight Lidmanf995db72020-04-27 11:15:12 +0200243 if len(ifm_tensor.shape) > 2 and ifm_tensor.shape[0] != 1:
244 return False
245 if op.type in self.binary_elem_wise_main_ops: # if op type is unary, ifm2_tensor is None
246 if len(ifm2_tensor.shape) > 2 and ifm2_tensor.shape[0] != 1:
247 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100248 return True
249
250 def check_memory_only_restrictions(self, op):
Tim Hall79d07d22020-04-27 18:20:16 +0100251 if op.type == "StridedSlice":
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200252 # check stride size
Tim Hall79d07d22020-04-27 18:20:16 +0100253 if len(op.inputs) > 3 and any(stride != 1 for stride in op.inputs[3].values):
254 return False
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200255 # check ellipsis_mask
256 if op.attrs["ellipsis_mask"] != 0:
257 return False
258 # check if both new_axis_mask and shrink_axis_mask have bit set
259 if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0:
260 return False
Tim Hall79d07d22020-04-27 18:20:16 +0100261 return True