blob: f132eef787d5fe04183fdba79e37c13323ca3fd5 [file] [log] [blame]
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
20
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020021from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010022from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020023from ethosu.vela.operation import Op
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020024from ethosu.vela.supported_operators import SupportedOperators
25from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010026from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020027from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29
30support = SupportedOperators()
31
32
Michael McGeagh65fd9982020-10-20 11:49:28 +010033def test_constraint_tens_no_dynamic():
34 # Tensors cannot be dynamic (no shape, not a scalar)
35 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020036 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010037
38
39def test_constraint_tens_defined_shape():
40 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010041 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010042 assert not support.is_operator_supported(op)
43
44
Michael McGeagh65fd9982020-10-20 11:49:28 +010045def test_constraint_tens_output_scalar():
46 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010047 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010048 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010049 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010050
51
Michael McGeagh65fd9982020-10-20 11:49:28 +010052def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010053 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010054 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010055 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010056 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010057 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010058 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010059 assert not support.is_operator_supported(op)
60
61
62def test_constraint_tens_shape_size():
63 # Tensors cannot be > 4D
Michael McGeagh1f951fc2020-10-14 09:30:02 +010064 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010065 assert not support.is_operator_supported(op)
66
67
68def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010069 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010070 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010071 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010072
73
74def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010075 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010076 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010077 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert not support.is_operator_supported(op)
80
81
82def test_constraint_tens_dimension():
83 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010084 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010085 assert not support.is_operator_supported(op)
86
87
Michael McGeagh184b2502020-10-09 17:19:52 +010088def test_constraint_tens_quant_none_check():
89 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010090 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010091 assert not support.is_operator_supported(op)
92
93
94def test_constraint_tens_quant_scale():
95 # Quantization scale cannot be infinit
96 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010097 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +010099 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 assert not support.is_operator_supported(op)
101
102
Dwight Lidmanc7187432020-11-16 17:40:46 +0100103def test_constraint_tens_quant_per_axis_not_supp():
104 # Quantization scale cannot be array-valued for elemwise ops
105 qp = QuantizationParameters()
106 qp.zero_point = np.zeros((1, 3))
107 qp.scale_f32 = np.ones((1, 3))
108 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
109 assert not support.is_operator_supported(op)
110
111
112def test_constraint_tens_quant_per_axis_is_supp():
113 op = testutil.create_op_with_quant_tensors(
114 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
115 )
116 op.attrs = {"stride_w": 1, "stride_h": 1}
117 assert support.is_operator_supported(op)
118 qp = QuantizationParameters()
119 qp.zero_point = np.zeros((1, 3))
120 qp.scale_f32 = np.ones((1, 3))
121 op.bias.quantization = qp
122 assert support.is_operator_supported(op)
123
124
Dwight Lidman0dd21c72020-11-24 13:45:50 +0100125def test_constraint_fc_output_2d_not_supp():
126 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1], [3, 2, 2, 1], weights_shape=[12, 1, 1, 1])
127 assert not support.is_operator_supported(op)
128 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1, 1, 1], [1, 3, 4], weights_shape=[12, 1, 1, 1])
129 assert not support.is_operator_supported(op)
130 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1, 1, 1], [1], weights_shape=[1, 1, 1, 1])
131 assert not support.is_operator_supported(op)
132
133
134def test_constraint_fc_output_2d_is_supp():
135 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
136 assert support.is_operator_supported(op)
137 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
138 assert support.is_operator_supported(op)
139
140
Michael McGeagh37ded342020-10-01 15:37:44 +0100141def test_constraint_faf():
142 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100143 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100144 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100145 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100146
147
148def test_constraint_conv_pass():
149 # First test a simple conv passes
150 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
151 op.attrs = {"stride_w": 1, "stride_h": 1}
152 assert support.is_operator_supported(op)
153
154
155def test_constraint_stride_type():
156 # Stride width and height must be integer types
157 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
158 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
159 assert not support.is_operator_supported(op)
160
161
162def test_constraint_stride_range():
163 # Stride width and height must lie within a certain range
164 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
165 op.attrs = {"stride_w": 0, "stride_h": 20}
166 assert not support.is_operator_supported(op)
167
168
169def test_constraint_dilation_type():
170 # Dilation width and height must be integer types
171 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
172 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
173 assert not support.is_operator_supported(op)
174
175
176def test_constraint_dilation_range():
177 # Dilation width and height must lie within a certain range
178 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
179 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
180 assert not support.is_operator_supported(op)
181
182
183def test_constraint_dilated_height_range():
184 # Dilated kernel height must lie within a certain range
185 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
186 op.attrs = {"stride_w": 1, "stride_h": 1}
187 assert not support.is_operator_supported(op)
188
189
190def test_constraint_dilated_product_range():
191 # Dilated kernel width x height must lie within a certain range
192 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
193 op.attrs = {"stride_w": 1, "stride_h": 1}
194 assert not support.is_operator_supported(op)
195
196
197def test_constraint_weights_type():
198 # Weight tensor must be 8-bit
199 op = testutil.create_op_with_quant_tensors(
200 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
201 )
202 op.attrs = {"stride_w": 1, "stride_h": 1}
203 assert not support.is_operator_supported(op)
204
205
Michael McGeagh65fd9982020-10-20 11:49:28 +0100206def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100207 # Weight tensor cannot be non-const tensors
208 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
209 op.attrs = {"stride_w": 1, "stride_h": 1}
210 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100211 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100212 op.add_input_tensor(weights)
213 assert not support.is_operator_supported(op)
214
215
216def test_constraint_weights_limit():
217 # Sum of weights has a limit
218 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
219 op.attrs = {"stride_w": 1, "stride_h": 1}
220 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
221 assert not support.is_operator_supported(op)
222
223
224def test_constraint_bias_type():
225 # Bias must have a certain datatype
226 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
227 op.attrs = {"stride_w": 1, "stride_h": 1}
228 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
229 op.add_input_tensor(bias)
230 assert not support.is_operator_supported(op)
231
232
233def test_constraint_bias_40bit():
234 # Bias must not exceed 40-bit
235 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
236 op.attrs = {"stride_w": 1, "stride_h": 1}
237 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100238 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100239 op.add_input_tensor(bias)
240 assert not support.is_operator_supported(op)
241
242
243def test_constraint_batch_size():
244 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
245 op.attrs = {"stride_w": 1, "stride_h": 1}
246 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100247
248
249def test_constraint_quant_scale_inf():
250 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
251 op.ofm.quantization.scale_f32 = np.float32(1e-39)
252 assert not support.is_operator_supported(op)
253
254
255def test_constraint_depth_multiplier():
256 # Valid. Depth multiplier is 1 so no further constraints
257 op = testutil.create_op_with_quant_tensors(
258 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
259 )
260 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
261 assert support.is_operator_supported(op)
262 # Invalid. Depth multiplier doesnt equal ofm channel
263 op = testutil.create_op_with_quant_tensors(
264 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
265 )
266 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
267 assert not support.is_operator_supported(op)
268 # Valid. Depth multiplier is equal to ofm channel
269 op = testutil.create_op_with_quant_tensors(
270 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
271 )
272 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
273 assert support.is_operator_supported(op)
274
275
276def test_constraint_tconv_stride():
277 # Strides must be 2
278 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
279 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
280 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
281 ifm.quantization = testutil.default_quant_params()
282 op.add_input_tensor(ifm)
283 assert not support.is_operator_supported(op)
284
285
286def test_constraint_tconv_same():
287 # Valid
288 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
289 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
290 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
291 ifm.quantization = testutil.default_quant_params()
292 op.add_input_tensor(ifm)
293 assert support.is_operator_supported(op)
294 # Invalid
295 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
296 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
297 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
298 ifm.quantization = testutil.default_quant_params()
299 op.add_input_tensor(ifm)
300 assert not support.is_operator_supported(op)
301
302
303def test_constraint_tconv_valid():
304 # Valid
305 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
306 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
307 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
308 ifm.quantization = testutil.default_quant_params()
309 op.add_input_tensor(ifm)
310 assert support.is_operator_supported(op)
311 # Invalid
312 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
313 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
314 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
315 ifm.quantization = testutil.default_quant_params()
316 op.add_input_tensor(ifm)
317 assert not support.is_operator_supported(op)
318
319
320def test_constraint_matching_in_out_types():
321 # Valid
322 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
323 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"}
324 assert support.is_operator_supported(op)
325 # Invalid. datatypes for ifm and ofm must match (default uint8)
326 op.ifm.dtype = DataType.int8
327 assert not support.is_operator_supported(op)
328
329
330def test_constraint_filter_type():
331 # Filter width/height must be integers
332 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
333 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"}
334 assert not support.is_operator_supported(op)
335
336
337def test_constraint_filter_range():
338 # Avg pool restrictions are dependent on padding:
339 # SAME padding restricts both W and H to max 8
340 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
341 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"}
342 assert not support.is_operator_supported(op)
343 # VALID padding limits are much larger
344 op.attrs["padding"] = b"VALID"
345 assert support.is_operator_supported(op)
346
347
348def test_constraint_filter_height_range_valid_pad():
349 # Avg pool restrictions are dependent on padding:
350 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
351 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"VALID"}
352 assert support.is_operator_supported(op)
353 # VALID padding restricts to 256 in filter height
354 op.attrs["filter_height"] = 257
355 assert not support.is_operator_supported(op)
356
357
358def test_constraint_filter_product_height_range_valid_pad():
359 # Avg pool restrictions are dependent on padding:
360 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
361 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"}
362 assert support.is_operator_supported(op)
363 # VALID padding restricts filter W x H to 256x256
364 op.attrs["filter_width"] = 257
365 assert not support.is_operator_supported(op)
366
367
368def test_constraint_filter_height_range():
369 # Max pool restrictions arent dependent on padding
370 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
371 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"SAME"}
372 assert support.is_operator_supported(op)
373 # Restricts to 256 in filter height
374 op.attrs["filter_height"] = 257
375 assert not support.is_operator_supported(op)
376 # Doesnt matter if SAME or VALID
377 op.attrs["padding"] = b"VALID"
378 assert not support.is_operator_supported(op)
379
380
381def test_constraint_filter_product_height_range():
382 # Max pool restrictions arent dependent on padding
383 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
384 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"}
385 assert support.is_operator_supported(op)
386 # Restricts filter W x H to 256x256
387 op.attrs["filter_width"] = 257
388 assert not support.is_operator_supported(op)
389 # Doesnt matter if SAME or VALID
390 op.attrs["padding"] = b"VALID"
391 assert not support.is_operator_supported(op)
392
393
394def test_constraint_resize():
395 # IFM W and H == 1
396 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
397 assert support.is_operator_supported(op)
398 # IFM == OFM
399 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
400 assert support.is_operator_supported(op)
401 # IFM x2 == OFM ; align_corners = False
402 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
403 assert support.is_operator_supported(op)
404 # IFM x2 -1 == OFM ; align_corners = True
405 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
406 op.attrs["align_corners"] = True
407 assert support.is_operator_supported(op)
408 # Invalid cases
409 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
410 assert not support.is_operator_supported(op)
411 op.attrs["align_corners"] = True
412 assert not support.is_operator_supported(op)
413
414
415def test_constraint_matching_shapes():
416 # Softmax requires the ifm and ofm shapes to match
417 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
418 assert not support.is_operator_supported(op)
419 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
420 assert support.is_operator_supported(op)
421
422
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100423def test_constraint_beta_value_range():
424 # beta must be positive
425 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
426 op.attrs["beta"] = -1.0
427 assert not support.is_operator_supported(op)
428 op.attrs["beta"] = 0.0
429 assert support.is_operator_supported(op)
430
431
Michael McGeagh65fd9982020-10-20 11:49:28 +0100432def test_constraint_splitv_inferred():
433 # SplitV requires a maximum of one inferred shape (-1)
434 qp = testutil.default_quant_params()
435 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
436 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
437 op.add_input_tensor(sizes)
438 assert not support.is_operator_supported(op)
439 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
440 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
441 op.add_input_tensor(sizes)
442 assert support.is_operator_supported(op)
443
444
445def test_constraint_concat_pass():
446 # A working concat
447 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
448 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
449 ifm2.quantization = testutil.default_quant_params()
450 op.add_input_tensor(ifm2)
451 op.attrs["axis"] = 3
452 assert support.is_operator_supported(op)
453
454
455def test_constraint_axis_exists():
456 # Missing axis attribute
457 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
458 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
459 ifm2.quantization = testutil.default_quant_params()
460 op.add_input_tensor(ifm2)
461 assert not support.is_operator_supported(op)
462
463
464def test_constraint_axis_valid():
465 # Invalid axis attribute
466 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
467 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
468 ifm2.quantization = testutil.default_quant_params()
469 op.add_input_tensor(ifm2)
470 op.attrs["axis"] = 7
471 assert not support.is_operator_supported(op)
472
473
474def test_constraint_matching_dimensionality():
475 # Mismatching dimensionality: 4D+2D=4D
476 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
477 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
478 ifm2.quantization = testutil.default_quant_params()
479 op.add_input_tensor(ifm2)
480 op.attrs["axis"] = 3
481 assert not support.is_operator_supported(op)
482
483
484def test_constraint_valid_dimensions():
485 # Mismatching dimension value:
486 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
487 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
488 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
489 ifm2.quantization = testutil.default_quant_params()
490 op.add_input_tensor(ifm2)
491 op.attrs["axis"] = 3
492 assert not support.is_operator_supported(op)
493
494
495def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
496 qp = testutil.default_quant_params()
497 in0 = Tensor(in_shape, DataType.uint8, "in")
498 in0.quantization = qp
499 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
500 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
501 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
502 out = Tensor(out_shape, DataType.uint8, "out")
503 out.quantization = qp
504 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
505 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
506
507
508def create_strided_slice():
509 # Creates a valid strided slice operator with some valid inputs/outputs
510 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
511 op.attrs["begin_mask"] = 1
512 op.attrs["end_mask"] = 9
513 assert support.is_operator_supported(op)
514 return op
515
516
517def test_constraint_stridedslice_input_count():
518 # Wrong number of input tensors
519 op = create_strided_slice()
520 op.add_input_tensor(op.inputs[0].clone())
521 assert not support.is_operator_supported(op)
522
523
524def test_constraint_stridedslice_inputs_const():
525 # begin, end, stride values must not be None
526 op = create_strided_slice()
527 op.inputs[1].values = None
528 assert not support.is_operator_supported(op)
529 op = create_strided_slice()
530 op.inputs[2].values = None
531 assert not support.is_operator_supported(op)
532 op = create_strided_slice()
533 op.inputs[3].values = None
534 assert not support.is_operator_supported(op)
535
536
Michael McGeagh65fd9982020-10-20 11:49:28 +0100537def test_constraint_stridedslice_stride_values():
538 # Unsupported strides
539 op = create_strided_slice()
540 op.inputs[3].values = [1, 1, 2, 1]
541 assert not support.is_operator_supported(op)
542
543
544def test_constraint_ellipsis_mask():
545 # Unsupported ellipsis mask
546 op = create_strided_slice()
547 op.attrs["ellipsis_mask"] = 1
548 assert not support.is_operator_supported(op)
549
550
551def test_constraint_axis_masks():
552 op = create_strided_slice()
553 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
554 op.attrs["new_axis_mask"] = 2
555 assert support.is_operator_supported(op)
556 op = create_strided_slice()
557 op.attrs["shrink_axis_mask"] = 3
558 assert support.is_operator_supported(op)
559 # But setting both to non-zero is not supported
560 op.attrs["new_axis_mask"] = 2
561 assert not support.is_operator_supported(op)
562
563
564def test_constraint_slice_ranges():
565 # Examples where end offset <= begin offset
566 op = create_strided_slice()
567 op.inputs[1].values = [0, 7, 2, 0]
568 assert not support.is_operator_supported(op)
569 op = create_strided_slice()
570 op.inputs[2].values = [0, 7, 2, 0]
571 assert not support.is_operator_supported(op)
572 op = create_strided_slice()
573 op.attrs["begin_mask"] = 0
574 assert not support.is_operator_supported(op)
575 op = create_strided_slice()
576 op.attrs["end_mask"] = 0
577 assert not support.is_operator_supported(op)
578
579
580def test_constraint_matching_inputs_types():
581 # input data types must match (default is uint8)
582 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
583 op.ifm2.dtype = DataType.int8
584 assert not support.is_operator_supported(op)
585
586
587def test_constraint_matching_signed():
588 # signed inputs require output to also be signed
589 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
590 op.ofm.dtype = DataType.uint8
591 assert not support.is_operator_supported(op)
592
593
594def test_constraint_unsigned_valid():
595 # unsigned inputs require output to be either:
596 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
597 # the same (default uint8)
598 assert support.is_operator_supported(op)
599 op.ofm.dtype = DataType.int8
600 assert not support.is_operator_supported(op)
601 op.ofm.dtype = DataType.int16
602 assert not support.is_operator_supported(op)
603 # or int32
604 op.ofm.dtype = DataType.int32
605 assert support.is_operator_supported(op)
606
607
608def test_constraint_inputs_int32():
609 # both inputs must be type int32
610 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
611 assert not support.is_operator_supported(op)
612 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
613 assert support.is_operator_supported(op)
614 op.ifm2.dtype = DataType.int16
615 assert not support.is_operator_supported(op)
616
617
618def test_constraint_output_int32():
619 # output must be type int32
620 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
621 assert support.is_operator_supported(op)
622 op.ofm.dtype = DataType.int16
623 assert not support.is_operator_supported(op)
624
625
626def test_constraint_matching_quantization_parameters():
627 qp = QuantizationParameters()
628 qp.scale_f32 = np.float32(1.5)
629 qp.zero_point = 128
630 # valid - all matching (uses default quant params)
631 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
632 assert support.is_operator_supported(op)
633 # invalid - ifm mismatch ofm
634 op.ifm.quantization = qp
635 assert not support.is_operator_supported(op)
636 # invalid - ifm2 mismatch ofm
637 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
638 op.ifm2.quantization = qp
639 assert not support.is_operator_supported(op)
640 # invalid - both ifm and ifm2 mismatch ofm
641 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
642 op.ifm.quantization = qp
643 op.ifm2.quantization = qp
644 assert not support.is_operator_supported(op)
645 # valid - all matching
646 op.ofm.quantization = qp
647 assert support.is_operator_supported(op)
648
649
650def test_constraint_elemwise_batch_size():
651 # BINARY CASE
652 # Batch can be >1 if dims is <=2D
653 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
654 assert support.is_operator_supported(op)
655 # For dims >2D, batch must be 1
656 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
657 assert support.is_operator_supported(op)
658 # invalid case
659 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
660 assert not support.is_operator_supported(op)
661
662 # UNARY CASE
663 # Batch can be >1 if dims is <=2D
664 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
665 assert support.is_operator_supported(op)
666 # For dims >2D, batch must be 1
667 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
668 assert support.is_operator_supported(op)
669 # invalid case
670 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
671 assert not support.is_operator_supported(op)
672
673
674def test_constraint_matching_either_shapes():
675 # BINARY CASE
676 # At least one ifm shape must match ofm's shape
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100677 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100678 assert support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100679 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [1, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100680 assert support.is_operator_supported(op)
681 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
682 assert not support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100683 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 4, 16])
684 assert not support.is_operator_supported(op)
685 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 4, 16])
686 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100687
688 # UNARY CASE
689 # No second input so this is treated the same as requiring ifm shape to match ofm shape
690 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
691 assert support.is_operator_supported(op)
692 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
693 assert not support.is_operator_supported(op)
694
695
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100696def test_constraint_broadcast_shapes():
697 # BINARY CASE
698 # Only allow broadcast to 1 dim, for 1 rank index
699 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
700 assert support.is_operator_supported(op)
701 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
702 assert support.is_operator_supported(op)
703 # Only allow broadcast to 1 dim, for 3 rank indexes
704 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
705 assert support.is_operator_supported(op)
706 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
707 assert support.is_operator_supported(op)
708 # One broadcast dim not 1
709 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
710 assert not support.is_operator_supported(op)
711 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
712 assert not support.is_operator_supported(op)
713 # OFM shape dim largest ifm/ifm2 shape dim
714 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
715 assert not support.is_operator_supported(op)
716 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
717 assert not support.is_operator_supported(op)
718 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
719 assert not support.is_operator_supported(op)
720 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
721 assert not support.is_operator_supported(op)
722
723
Michael McGeagh65fd9982020-10-20 11:49:28 +0100724def test_constraint_alpha_valid():
725 # Alpha cannot be negative
726 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
727 op.attrs["alpha"] = 0
728 assert support.is_operator_supported(op)
729 op.attrs["alpha"] = -1
730 assert not support.is_operator_supported(op)