blob: 86d24757346949b0aa3381d21b8b157233ca3aee [file] [log] [blame]
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
20
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020021from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010022from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020023from ethosu.vela.operation import Op
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020024from ethosu.vela.supported_operators import SupportedOperators
25from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010026from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020027from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29
30support = SupportedOperators()
31
32
Michael McGeagh65fd9982020-10-20 11:49:28 +010033def test_constraint_tens_no_dynamic():
34 # Tensors cannot be dynamic (no shape, not a scalar)
35 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020036 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010037
38
39def test_constraint_tens_defined_shape():
40 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010041 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010042 assert not support.is_operator_supported(op)
43
44
Michael McGeagh65fd9982020-10-20 11:49:28 +010045def test_constraint_tens_output_scalar():
46 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010047 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010048 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010049 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010050
51
Michael McGeagh65fd9982020-10-20 11:49:28 +010052def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010053 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010054 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010055 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010056 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010057 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010058 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010059 assert not support.is_operator_supported(op)
60
61
62def test_constraint_tens_shape_size():
63 # Tensors cannot be > 4D
Michael McGeagh1f951fc2020-10-14 09:30:02 +010064 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010065 assert not support.is_operator_supported(op)
66
67
68def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010069 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010070 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010071 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010072
73
74def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010075 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010076 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010077 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert not support.is_operator_supported(op)
80
81
82def test_constraint_tens_dimension():
83 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010084 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010085 assert not support.is_operator_supported(op)
86
87
Michael McGeagh184b2502020-10-09 17:19:52 +010088def test_constraint_tens_quant_none_check():
89 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010090 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010091 assert not support.is_operator_supported(op)
92
93
94def test_constraint_tens_quant_scale():
95 # Quantization scale cannot be infinit
96 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010097 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +010099 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 assert not support.is_operator_supported(op)
101
102
Dwight Lidmanc7187432020-11-16 17:40:46 +0100103def test_constraint_tens_quant_per_axis_not_supp():
104 # Quantization scale cannot be array-valued for elemwise ops
105 qp = QuantizationParameters()
106 qp.zero_point = np.zeros((1, 3))
107 qp.scale_f32 = np.ones((1, 3))
108 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
109 assert not support.is_operator_supported(op)
110
111
112def test_constraint_tens_quant_per_axis_is_supp():
113 op = testutil.create_op_with_quant_tensors(
114 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
115 )
116 op.attrs = {"stride_w": 1, "stride_h": 1}
117 assert support.is_operator_supported(op)
118 qp = QuantizationParameters()
119 qp.zero_point = np.zeros((1, 3))
120 qp.scale_f32 = np.ones((1, 3))
121 op.bias.quantization = qp
122 assert support.is_operator_supported(op)
123
124
Michael McGeagh37ded342020-10-01 15:37:44 +0100125def test_constraint_faf():
126 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100127 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100128 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100129 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100130
131
132def test_constraint_conv_pass():
133 # First test a simple conv passes
134 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
135 op.attrs = {"stride_w": 1, "stride_h": 1}
136 assert support.is_operator_supported(op)
137
138
139def test_constraint_stride_type():
140 # Stride width and height must be integer types
141 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
142 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
143 assert not support.is_operator_supported(op)
144
145
146def test_constraint_stride_range():
147 # Stride width and height must lie within a certain range
148 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
149 op.attrs = {"stride_w": 0, "stride_h": 20}
150 assert not support.is_operator_supported(op)
151
152
153def test_constraint_dilation_type():
154 # Dilation width and height must be integer types
155 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
156 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
157 assert not support.is_operator_supported(op)
158
159
160def test_constraint_dilation_range():
161 # Dilation width and height must lie within a certain range
162 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
163 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
164 assert not support.is_operator_supported(op)
165
166
167def test_constraint_dilated_height_range():
168 # Dilated kernel height must lie within a certain range
169 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
170 op.attrs = {"stride_w": 1, "stride_h": 1}
171 assert not support.is_operator_supported(op)
172
173
174def test_constraint_dilated_product_range():
175 # Dilated kernel width x height must lie within a certain range
176 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
177 op.attrs = {"stride_w": 1, "stride_h": 1}
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_weights_type():
182 # Weight tensor must be 8-bit
183 op = testutil.create_op_with_quant_tensors(
184 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
185 )
186 op.attrs = {"stride_w": 1, "stride_h": 1}
187 assert not support.is_operator_supported(op)
188
189
Michael McGeagh65fd9982020-10-20 11:49:28 +0100190def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100191 # Weight tensor cannot be non-const tensors
192 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
193 op.attrs = {"stride_w": 1, "stride_h": 1}
194 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100195 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100196 op.add_input_tensor(weights)
197 assert not support.is_operator_supported(op)
198
199
200def test_constraint_weights_limit():
201 # Sum of weights has a limit
202 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
203 op.attrs = {"stride_w": 1, "stride_h": 1}
204 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
205 assert not support.is_operator_supported(op)
206
207
208def test_constraint_bias_type():
209 # Bias must have a certain datatype
210 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
211 op.attrs = {"stride_w": 1, "stride_h": 1}
212 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
213 op.add_input_tensor(bias)
214 assert not support.is_operator_supported(op)
215
216
217def test_constraint_bias_40bit():
218 # Bias must not exceed 40-bit
219 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
220 op.attrs = {"stride_w": 1, "stride_h": 1}
221 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100222 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100223 op.add_input_tensor(bias)
224 assert not support.is_operator_supported(op)
225
226
227def test_constraint_batch_size():
228 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
229 op.attrs = {"stride_w": 1, "stride_h": 1}
230 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100231
232
233def test_constraint_quant_scale_inf():
234 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
235 op.ofm.quantization.scale_f32 = np.float32(1e-39)
236 assert not support.is_operator_supported(op)
237
238
239def test_constraint_depth_multiplier():
240 # Valid. Depth multiplier is 1 so no further constraints
241 op = testutil.create_op_with_quant_tensors(
242 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
243 )
244 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
245 assert support.is_operator_supported(op)
246 # Invalid. Depth multiplier doesnt equal ofm channel
247 op = testutil.create_op_with_quant_tensors(
248 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
249 )
250 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
251 assert not support.is_operator_supported(op)
252 # Valid. Depth multiplier is equal to ofm channel
253 op = testutil.create_op_with_quant_tensors(
254 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
255 )
256 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
257 assert support.is_operator_supported(op)
258
259
260def test_constraint_tconv_stride():
261 # Strides must be 2
262 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
263 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
264 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
265 ifm.quantization = testutil.default_quant_params()
266 op.add_input_tensor(ifm)
267 assert not support.is_operator_supported(op)
268
269
270def test_constraint_tconv_same():
271 # Valid
272 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
273 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
274 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
275 ifm.quantization = testutil.default_quant_params()
276 op.add_input_tensor(ifm)
277 assert support.is_operator_supported(op)
278 # Invalid
279 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
280 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
281 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
282 ifm.quantization = testutil.default_quant_params()
283 op.add_input_tensor(ifm)
284 assert not support.is_operator_supported(op)
285
286
287def test_constraint_tconv_valid():
288 # Valid
289 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
290 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
291 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
292 ifm.quantization = testutil.default_quant_params()
293 op.add_input_tensor(ifm)
294 assert support.is_operator_supported(op)
295 # Invalid
296 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
297 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
298 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
299 ifm.quantization = testutil.default_quant_params()
300 op.add_input_tensor(ifm)
301 assert not support.is_operator_supported(op)
302
303
304def test_constraint_matching_in_out_types():
305 # Valid
306 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
307 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"}
308 assert support.is_operator_supported(op)
309 # Invalid. datatypes for ifm and ofm must match (default uint8)
310 op.ifm.dtype = DataType.int8
311 assert not support.is_operator_supported(op)
312
313
314def test_constraint_filter_type():
315 # Filter width/height must be integers
316 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
317 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"}
318 assert not support.is_operator_supported(op)
319
320
321def test_constraint_filter_range():
322 # Avg pool restrictions are dependent on padding:
323 # SAME padding restricts both W and H to max 8
324 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
325 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"}
326 assert not support.is_operator_supported(op)
327 # VALID padding limits are much larger
328 op.attrs["padding"] = b"VALID"
329 assert support.is_operator_supported(op)
330
331
332def test_constraint_filter_height_range_valid_pad():
333 # Avg pool restrictions are dependent on padding:
334 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
335 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"VALID"}
336 assert support.is_operator_supported(op)
337 # VALID padding restricts to 256 in filter height
338 op.attrs["filter_height"] = 257
339 assert not support.is_operator_supported(op)
340
341
342def test_constraint_filter_product_height_range_valid_pad():
343 # Avg pool restrictions are dependent on padding:
344 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
345 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"}
346 assert support.is_operator_supported(op)
347 # VALID padding restricts filter W x H to 256x256
348 op.attrs["filter_width"] = 257
349 assert not support.is_operator_supported(op)
350
351
352def test_constraint_filter_height_range():
353 # Max pool restrictions arent dependent on padding
354 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
355 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"SAME"}
356 assert support.is_operator_supported(op)
357 # Restricts to 256 in filter height
358 op.attrs["filter_height"] = 257
359 assert not support.is_operator_supported(op)
360 # Doesnt matter if SAME or VALID
361 op.attrs["padding"] = b"VALID"
362 assert not support.is_operator_supported(op)
363
364
365def test_constraint_filter_product_height_range():
366 # Max pool restrictions arent dependent on padding
367 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
368 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"}
369 assert support.is_operator_supported(op)
370 # Restricts filter W x H to 256x256
371 op.attrs["filter_width"] = 257
372 assert not support.is_operator_supported(op)
373 # Doesnt matter if SAME or VALID
374 op.attrs["padding"] = b"VALID"
375 assert not support.is_operator_supported(op)
376
377
378def test_constraint_resize():
379 # IFM W and H == 1
380 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
381 assert support.is_operator_supported(op)
382 # IFM == OFM
383 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
384 assert support.is_operator_supported(op)
385 # IFM x2 == OFM ; align_corners = False
386 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
387 assert support.is_operator_supported(op)
388 # IFM x2 -1 == OFM ; align_corners = True
389 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
390 op.attrs["align_corners"] = True
391 assert support.is_operator_supported(op)
392 # Invalid cases
393 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
394 assert not support.is_operator_supported(op)
395 op.attrs["align_corners"] = True
396 assert not support.is_operator_supported(op)
397
398
399def test_constraint_matching_shapes():
400 # Softmax requires the ifm and ofm shapes to match
401 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
402 assert not support.is_operator_supported(op)
403 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
404 assert support.is_operator_supported(op)
405
406
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100407def test_constraint_beta_value_range():
408 # beta must be positive
409 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
410 op.attrs["beta"] = -1.0
411 assert not support.is_operator_supported(op)
412 op.attrs["beta"] = 0.0
413 assert support.is_operator_supported(op)
414
415
Michael McGeagh65fd9982020-10-20 11:49:28 +0100416def test_constraint_splitv_inferred():
417 # SplitV requires a maximum of one inferred shape (-1)
418 qp = testutil.default_quant_params()
419 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
420 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
421 op.add_input_tensor(sizes)
422 assert not support.is_operator_supported(op)
423 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
424 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
425 op.add_input_tensor(sizes)
426 assert support.is_operator_supported(op)
427
428
429def test_constraint_concat_pass():
430 # A working concat
431 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
432 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
433 ifm2.quantization = testutil.default_quant_params()
434 op.add_input_tensor(ifm2)
435 op.attrs["axis"] = 3
436 assert support.is_operator_supported(op)
437
438
439def test_constraint_axis_exists():
440 # Missing axis attribute
441 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
442 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
443 ifm2.quantization = testutil.default_quant_params()
444 op.add_input_tensor(ifm2)
445 assert not support.is_operator_supported(op)
446
447
448def test_constraint_axis_valid():
449 # Invalid axis attribute
450 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
451 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
452 ifm2.quantization = testutil.default_quant_params()
453 op.add_input_tensor(ifm2)
454 op.attrs["axis"] = 7
455 assert not support.is_operator_supported(op)
456
457
458def test_constraint_matching_dimensionality():
459 # Mismatching dimensionality: 4D+2D=4D
460 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
461 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
462 ifm2.quantization = testutil.default_quant_params()
463 op.add_input_tensor(ifm2)
464 op.attrs["axis"] = 3
465 assert not support.is_operator_supported(op)
466
467
468def test_constraint_valid_dimensions():
469 # Mismatching dimension value:
470 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
471 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
472 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
473 ifm2.quantization = testutil.default_quant_params()
474 op.add_input_tensor(ifm2)
475 op.attrs["axis"] = 3
476 assert not support.is_operator_supported(op)
477
478
479def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
480 qp = testutil.default_quant_params()
481 in0 = Tensor(in_shape, DataType.uint8, "in")
482 in0.quantization = qp
483 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
484 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
485 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
486 out = Tensor(out_shape, DataType.uint8, "out")
487 out.quantization = qp
488 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
489 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
490
491
492def create_strided_slice():
493 # Creates a valid strided slice operator with some valid inputs/outputs
494 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
495 op.attrs["begin_mask"] = 1
496 op.attrs["end_mask"] = 9
497 assert support.is_operator_supported(op)
498 return op
499
500
501def test_constraint_stridedslice_input_count():
502 # Wrong number of input tensors
503 op = create_strided_slice()
504 op.add_input_tensor(op.inputs[0].clone())
505 assert not support.is_operator_supported(op)
506
507
508def test_constraint_stridedslice_inputs_const():
509 # begin, end, stride values must not be None
510 op = create_strided_slice()
511 op.inputs[1].values = None
512 assert not support.is_operator_supported(op)
513 op = create_strided_slice()
514 op.inputs[2].values = None
515 assert not support.is_operator_supported(op)
516 op = create_strided_slice()
517 op.inputs[3].values = None
518 assert not support.is_operator_supported(op)
519
520
Michael McGeagh65fd9982020-10-20 11:49:28 +0100521def test_constraint_stridedslice_stride_values():
522 # Unsupported strides
523 op = create_strided_slice()
524 op.inputs[3].values = [1, 1, 2, 1]
525 assert not support.is_operator_supported(op)
526
527
528def test_constraint_ellipsis_mask():
529 # Unsupported ellipsis mask
530 op = create_strided_slice()
531 op.attrs["ellipsis_mask"] = 1
532 assert not support.is_operator_supported(op)
533
534
535def test_constraint_axis_masks():
536 op = create_strided_slice()
537 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
538 op.attrs["new_axis_mask"] = 2
539 assert support.is_operator_supported(op)
540 op = create_strided_slice()
541 op.attrs["shrink_axis_mask"] = 3
542 assert support.is_operator_supported(op)
543 # But setting both to non-zero is not supported
544 op.attrs["new_axis_mask"] = 2
545 assert not support.is_operator_supported(op)
546
547
548def test_constraint_slice_ranges():
549 # Examples where end offset <= begin offset
550 op = create_strided_slice()
551 op.inputs[1].values = [0, 7, 2, 0]
552 assert not support.is_operator_supported(op)
553 op = create_strided_slice()
554 op.inputs[2].values = [0, 7, 2, 0]
555 assert not support.is_operator_supported(op)
556 op = create_strided_slice()
557 op.attrs["begin_mask"] = 0
558 assert not support.is_operator_supported(op)
559 op = create_strided_slice()
560 op.attrs["end_mask"] = 0
561 assert not support.is_operator_supported(op)
562
563
564def test_constraint_matching_inputs_types():
565 # input data types must match (default is uint8)
566 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
567 op.ifm2.dtype = DataType.int8
568 assert not support.is_operator_supported(op)
569
570
571def test_constraint_matching_signed():
572 # signed inputs require output to also be signed
573 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
574 op.ofm.dtype = DataType.uint8
575 assert not support.is_operator_supported(op)
576
577
578def test_constraint_unsigned_valid():
579 # unsigned inputs require output to be either:
580 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
581 # the same (default uint8)
582 assert support.is_operator_supported(op)
583 op.ofm.dtype = DataType.int8
584 assert not support.is_operator_supported(op)
585 op.ofm.dtype = DataType.int16
586 assert not support.is_operator_supported(op)
587 # or int32
588 op.ofm.dtype = DataType.int32
589 assert support.is_operator_supported(op)
590
591
592def test_constraint_inputs_int32():
593 # both inputs must be type int32
594 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
595 assert not support.is_operator_supported(op)
596 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
597 assert support.is_operator_supported(op)
598 op.ifm2.dtype = DataType.int16
599 assert not support.is_operator_supported(op)
600
601
602def test_constraint_output_int32():
603 # output must be type int32
604 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
605 assert support.is_operator_supported(op)
606 op.ofm.dtype = DataType.int16
607 assert not support.is_operator_supported(op)
608
609
610def test_constraint_matching_quantization_parameters():
611 qp = QuantizationParameters()
612 qp.scale_f32 = np.float32(1.5)
613 qp.zero_point = 128
614 # valid - all matching (uses default quant params)
615 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
616 assert support.is_operator_supported(op)
617 # invalid - ifm mismatch ofm
618 op.ifm.quantization = qp
619 assert not support.is_operator_supported(op)
620 # invalid - ifm2 mismatch ofm
621 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
622 op.ifm2.quantization = qp
623 assert not support.is_operator_supported(op)
624 # invalid - both ifm and ifm2 mismatch ofm
625 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
626 op.ifm.quantization = qp
627 op.ifm2.quantization = qp
628 assert not support.is_operator_supported(op)
629 # valid - all matching
630 op.ofm.quantization = qp
631 assert support.is_operator_supported(op)
632
633
634def test_constraint_elemwise_batch_size():
635 # BINARY CASE
636 # Batch can be >1 if dims is <=2D
637 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
638 assert support.is_operator_supported(op)
639 # For dims >2D, batch must be 1
640 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
641 assert support.is_operator_supported(op)
642 # invalid case
643 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
644 assert not support.is_operator_supported(op)
645
646 # UNARY CASE
647 # Batch can be >1 if dims is <=2D
648 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
649 assert support.is_operator_supported(op)
650 # For dims >2D, batch must be 1
651 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
652 assert support.is_operator_supported(op)
653 # invalid case
654 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
655 assert not support.is_operator_supported(op)
656
657
658def test_constraint_matching_either_shapes():
659 # BINARY CASE
660 # At least one ifm shape must match ofm's shape
661 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [4, 4], [2, 2])
662 assert support.is_operator_supported(op)
663 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [2, 2], [2, 2])
664 assert support.is_operator_supported(op)
665 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
666 assert not support.is_operator_supported(op)
667
668 # UNARY CASE
669 # No second input so this is treated the same as requiring ifm shape to match ofm shape
670 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
671 assert support.is_operator_supported(op)
672 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
673 assert not support.is_operator_supported(op)
674
675
676def test_constraint_alpha_valid():
677 # Alpha cannot be negative
678 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
679 op.attrs["alpha"] = 0
680 assert support.is_operator_supported(op)
681 op.attrs["alpha"] = -1
682 assert not support.is_operator_supported(op)