blob: 5f64dd9df169c794c2e951eedb3e42075cfedf65 [file] [log] [blame]
Louis Verhaardebf4af62021-01-27 15:57:57 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
Louis Verhaardebf4af62021-01-27 15:57:57 +010020import pytest
Michael McGeagh37ded342020-10-01 15:37:44 +010021
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020022from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010023from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020024from ethosu.vela.operation import Op
Michael McGeagh16895482020-12-14 15:51:20 +000025from ethosu.vela.operation import Padding
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020026from ethosu.vela.supported_operators import SupportedOperators
27from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010028from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020029from ethosu.vela.tensor import Tensor
30from ethosu.vela.test import testutil
31
32support = SupportedOperators()
33
34
Michael McGeagh65fd9982020-10-20 11:49:28 +010035def test_constraint_tens_no_dynamic():
36 # Tensors cannot be dynamic (no shape, not a scalar)
37 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020038 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010039
40
41def test_constraint_tens_defined_shape():
42 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010043 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010044 assert not support.is_operator_supported(op)
45
46
Michael McGeagh65fd9982020-10-20 11:49:28 +010047def test_constraint_tens_output_scalar():
48 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010049 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010050 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010051 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010052
53
Michael McGeagh65fd9982020-10-20 11:49:28 +010054def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010055 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010056 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010057 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010058 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010059 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010060 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010061 assert not support.is_operator_supported(op)
62
63
64def test_constraint_tens_shape_size():
65 # Tensors cannot be > 4D
patrik.gustavssoneeb85152020-12-21 17:10:40 +000066 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8], set_ifm_ofm_shapes=False)
Michael McGeagh37ded342020-10-01 15:37:44 +010067 assert not support.is_operator_supported(op)
68
69
70def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010071 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010072 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010073 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010074
75
76def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010077 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010080 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010081 assert not support.is_operator_supported(op)
82
83
84def test_constraint_tens_dimension():
85 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010086 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010087 assert not support.is_operator_supported(op)
88
89
Michael McGeagh184b2502020-10-09 17:19:52 +010090def test_constraint_tens_quant_none_check():
91 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010092 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010093 assert not support.is_operator_supported(op)
94
95
96def test_constraint_tens_quant_scale():
Louis Verhaard9a0cff12021-01-08 11:17:33 +010097 # Quantization scale cannot be infinite
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010099 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100101 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100102 assert not support.is_operator_supported(op)
103
104
Dwight Lidmanc7187432020-11-16 17:40:46 +0100105def test_constraint_tens_quant_per_axis_not_supp():
106 # Quantization scale cannot be array-valued for elemwise ops
107 qp = QuantizationParameters()
108 qp.zero_point = np.zeros((1, 3))
109 qp.scale_f32 = np.ones((1, 3))
110 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_tens_quant_per_axis_is_supp():
115 op = testutil.create_op_with_quant_tensors(
116 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
117 )
118 op.attrs = {"stride_w": 1, "stride_h": 1}
119 assert support.is_operator_supported(op)
120 qp = QuantizationParameters()
121 qp.zero_point = np.zeros((1, 3))
122 qp.scale_f32 = np.ones((1, 3))
123 op.bias.quantization = qp
124 assert support.is_operator_supported(op)
125
126
Dwight Lidman0dd21c72020-11-24 13:45:50 +0100127def test_constraint_fc_output_2d_not_supp():
128 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1], [3, 2, 2, 1], weights_shape=[12, 1, 1, 1])
129 assert not support.is_operator_supported(op)
130 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1, 1, 1], [1, 3, 4], weights_shape=[12, 1, 1, 1])
131 assert not support.is_operator_supported(op)
132 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1, 1, 1], [1], weights_shape=[1, 1, 1, 1])
133 assert not support.is_operator_supported(op)
134
135
136def test_constraint_fc_output_2d_is_supp():
137 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
138 assert support.is_operator_supported(op)
139 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
140 assert support.is_operator_supported(op)
141
142
Michael McGeagh37ded342020-10-01 15:37:44 +0100143def test_constraint_faf():
144 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100145 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100146 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100147 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100148
149
150def test_constraint_conv_pass():
151 # First test a simple conv passes
152 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
153 op.attrs = {"stride_w": 1, "stride_h": 1}
154 assert support.is_operator_supported(op)
155
156
157def test_constraint_stride_type():
158 # Stride width and height must be integer types
159 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
160 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
161 assert not support.is_operator_supported(op)
162
163
164def test_constraint_stride_range():
165 # Stride width and height must lie within a certain range
166 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
167 op.attrs = {"stride_w": 0, "stride_h": 20}
168 assert not support.is_operator_supported(op)
169
170
171def test_constraint_dilation_type():
172 # Dilation width and height must be integer types
173 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
174 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
175 assert not support.is_operator_supported(op)
176
177
178def test_constraint_dilation_range():
179 # Dilation width and height must lie within a certain range
180 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
181 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
182 assert not support.is_operator_supported(op)
183
184
185def test_constraint_dilated_height_range():
186 # Dilated kernel height must lie within a certain range
187 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
188 op.attrs = {"stride_w": 1, "stride_h": 1}
189 assert not support.is_operator_supported(op)
190
191
192def test_constraint_dilated_product_range():
193 # Dilated kernel width x height must lie within a certain range
194 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
195 op.attrs = {"stride_w": 1, "stride_h": 1}
196 assert not support.is_operator_supported(op)
197
198
199def test_constraint_weights_type():
200 # Weight tensor must be 8-bit
201 op = testutil.create_op_with_quant_tensors(
202 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
203 )
204 op.attrs = {"stride_w": 1, "stride_h": 1}
205 assert not support.is_operator_supported(op)
206
207
Michael McGeagh65fd9982020-10-20 11:49:28 +0100208def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100209 # Weight tensor cannot be non-const tensors
210 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
211 op.attrs = {"stride_w": 1, "stride_h": 1}
212 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100213 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100214 op.add_input_tensor(weights)
215 assert not support.is_operator_supported(op)
216
217
218def test_constraint_weights_limit():
219 # Sum of weights has a limit
220 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
221 op.attrs = {"stride_w": 1, "stride_h": 1}
222 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
223 assert not support.is_operator_supported(op)
224
225
226def test_constraint_bias_type():
227 # Bias must have a certain datatype
228 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
229 op.attrs = {"stride_w": 1, "stride_h": 1}
230 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
231 op.add_input_tensor(bias)
232 assert not support.is_operator_supported(op)
233
234
235def test_constraint_bias_40bit():
236 # Bias must not exceed 40-bit
237 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
238 op.attrs = {"stride_w": 1, "stride_h": 1}
239 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100240 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100241 op.add_input_tensor(bias)
242 assert not support.is_operator_supported(op)
243
244
245def test_constraint_batch_size():
246 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
247 op.attrs = {"stride_w": 1, "stride_h": 1}
248 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100249
250
251def test_constraint_quant_scale_inf():
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100252 # Test handling IFM scale/OFM scale is infinite
Michael McGeagh65fd9982020-10-20 11:49:28 +0100253 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100254 op.ifm.quantization.scale_f32 = np.float32(1e9)
255 op.ofm.quantization.scale_f32 = np.float32(1e-35)
256 assert not support.is_operator_supported(op)
257
258
259def test_constraint_ofm_scale_too_small():
260 # Tests handling of OFM scale < 1e-38
261 shp = [1, 10, 20, 16]
262 op = testutil.create_elemwise_op(Op.Mul, "mul", shp, shp, shp, ofm_quant=testutil.default_quant_params(),)
263 assert support.is_operator_supported(op)
264 op.ofm.quantization.scale_f32 = 1e-43
Michael McGeagh65fd9982020-10-20 11:49:28 +0100265 assert not support.is_operator_supported(op)
266
267
268def test_constraint_depth_multiplier():
269 # Valid. Depth multiplier is 1 so no further constraints
270 op = testutil.create_op_with_quant_tensors(
271 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
272 )
273 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
274 assert support.is_operator_supported(op)
275 # Invalid. Depth multiplier doesnt equal ofm channel
276 op = testutil.create_op_with_quant_tensors(
277 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
278 )
279 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
280 assert not support.is_operator_supported(op)
281 # Valid. Depth multiplier is equal to ofm channel
282 op = testutil.create_op_with_quant_tensors(
283 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
284 )
285 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
286 assert support.is_operator_supported(op)
287
288
289def test_constraint_tconv_stride():
290 # Strides must be 2
291 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000292 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100293 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
294 ifm.quantization = testutil.default_quant_params()
295 op.add_input_tensor(ifm)
296 assert not support.is_operator_supported(op)
297
298
299def test_constraint_tconv_same():
300 # Valid
301 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000302 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100303 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
304 ifm.quantization = testutil.default_quant_params()
305 op.add_input_tensor(ifm)
306 assert support.is_operator_supported(op)
307 # Invalid
308 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000309 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100310 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
311 ifm.quantization = testutil.default_quant_params()
312 op.add_input_tensor(ifm)
313 assert not support.is_operator_supported(op)
314
315
316def test_constraint_tconv_valid():
317 # Valid
318 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000319 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100320 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
321 ifm.quantization = testutil.default_quant_params()
322 op.add_input_tensor(ifm)
323 assert support.is_operator_supported(op)
324 # Invalid
325 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000326 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100327 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
328 ifm.quantization = testutil.default_quant_params()
329 op.add_input_tensor(ifm)
330 assert not support.is_operator_supported(op)
331
332
333def test_constraint_matching_in_out_types():
334 # Valid
335 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000336 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100337 assert support.is_operator_supported(op)
338 # Invalid. datatypes for ifm and ofm must match (default uint8)
339 op.ifm.dtype = DataType.int8
340 assert not support.is_operator_supported(op)
341
342
343def test_constraint_filter_type():
344 # Filter width/height must be integers
345 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000346 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100347 assert not support.is_operator_supported(op)
348
349
350def test_constraint_filter_range():
351 # Avg pool restrictions are dependent on padding:
352 # SAME padding restricts both W and H to max 8
353 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000354 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100355 assert not support.is_operator_supported(op)
356 # VALID padding limits are much larger
Michael McGeagh16895482020-12-14 15:51:20 +0000357 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100358 assert support.is_operator_supported(op)
359
360
361def test_constraint_filter_height_range_valid_pad():
362 # Avg pool restrictions are dependent on padding:
363 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000364 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100365 assert support.is_operator_supported(op)
366 # VALID padding restricts to 256 in filter height
367 op.attrs["filter_height"] = 257
368 assert not support.is_operator_supported(op)
369
370
371def test_constraint_filter_product_height_range_valid_pad():
372 # Avg pool restrictions are dependent on padding:
373 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000374 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100375 assert support.is_operator_supported(op)
376 # VALID padding restricts filter W x H to 256x256
377 op.attrs["filter_width"] = 257
378 assert not support.is_operator_supported(op)
379
380
381def test_constraint_filter_height_range():
382 # Max pool restrictions arent dependent on padding
383 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000384 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100385 assert support.is_operator_supported(op)
386 # Restricts to 256 in filter height
387 op.attrs["filter_height"] = 257
388 assert not support.is_operator_supported(op)
389 # Doesnt matter if SAME or VALID
Michael McGeagh16895482020-12-14 15:51:20 +0000390 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100391 assert not support.is_operator_supported(op)
392
393
394def test_constraint_filter_product_height_range():
395 # Max pool restrictions arent dependent on padding
396 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000397 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100398 assert support.is_operator_supported(op)
399 # Restricts filter W x H to 256x256
400 op.attrs["filter_width"] = 257
401 assert not support.is_operator_supported(op)
402 # Doesnt matter if SAME or VALID
Michael McGeagh16895482020-12-14 15:51:20 +0000403 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100404 assert not support.is_operator_supported(op)
405
406
407def test_constraint_resize():
408 # IFM W and H == 1
409 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
410 assert support.is_operator_supported(op)
411 # IFM == OFM
412 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
413 assert support.is_operator_supported(op)
414 # IFM x2 == OFM ; align_corners = False
415 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
416 assert support.is_operator_supported(op)
417 # IFM x2 -1 == OFM ; align_corners = True
418 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
419 op.attrs["align_corners"] = True
420 assert support.is_operator_supported(op)
421 # Invalid cases
422 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
423 assert not support.is_operator_supported(op)
424 op.attrs["align_corners"] = True
425 assert not support.is_operator_supported(op)
426
427
428def test_constraint_matching_shapes():
429 # Softmax requires the ifm and ofm shapes to match
430 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
431 assert not support.is_operator_supported(op)
432 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
433 assert support.is_operator_supported(op)
434
435
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100436def test_constraint_beta_value_range():
437 # beta must be positive
438 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
439 op.attrs["beta"] = -1.0
440 assert not support.is_operator_supported(op)
441 op.attrs["beta"] = 0.0
442 assert support.is_operator_supported(op)
443
444
Michael McGeagh65fd9982020-10-20 11:49:28 +0100445def test_constraint_splitv_inferred():
446 # SplitV requires a maximum of one inferred shape (-1)
447 qp = testutil.default_quant_params()
448 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
449 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
450 op.add_input_tensor(sizes)
451 assert not support.is_operator_supported(op)
452 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
453 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
454 op.add_input_tensor(sizes)
455 assert support.is_operator_supported(op)
456
457
458def test_constraint_concat_pass():
459 # A working concat
460 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
461 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
462 ifm2.quantization = testutil.default_quant_params()
463 op.add_input_tensor(ifm2)
464 op.attrs["axis"] = 3
465 assert support.is_operator_supported(op)
466
467
468def test_constraint_axis_exists():
469 # Missing axis attribute
470 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
471 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
472 ifm2.quantization = testutil.default_quant_params()
473 op.add_input_tensor(ifm2)
474 assert not support.is_operator_supported(op)
475
476
477def test_constraint_axis_valid():
478 # Invalid axis attribute
479 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
480 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
481 ifm2.quantization = testutil.default_quant_params()
482 op.add_input_tensor(ifm2)
483 op.attrs["axis"] = 7
484 assert not support.is_operator_supported(op)
485
486
487def test_constraint_matching_dimensionality():
488 # Mismatching dimensionality: 4D+2D=4D
489 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
490 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
491 ifm2.quantization = testutil.default_quant_params()
492 op.add_input_tensor(ifm2)
493 op.attrs["axis"] = 3
494 assert not support.is_operator_supported(op)
495
496
497def test_constraint_valid_dimensions():
498 # Mismatching dimension value:
499 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
500 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
501 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
502 ifm2.quantization = testutil.default_quant_params()
503 op.add_input_tensor(ifm2)
504 op.attrs["axis"] = 3
505 assert not support.is_operator_supported(op)
506
507
508def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
509 qp = testutil.default_quant_params()
510 in0 = Tensor(in_shape, DataType.uint8, "in")
511 in0.quantization = qp
512 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
513 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
514 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
515 out = Tensor(out_shape, DataType.uint8, "out")
516 out.quantization = qp
517 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
518 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
519
520
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100521def create_pad_op(
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100522 in_shape,
523 out_shape,
524 padding,
525 in_dtype=DataType.int8,
526 out_dtype=DataType.int8,
527 pad_dtype=DataType.int32,
528 pad_setting=Padding.VALID,
Louis Verhaardebf4af62021-01-27 15:57:57 +0100529 kernel_size=3,
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100530):
531 qp = testutil.default_quant_params()
532 in0 = Tensor(in_shape, in_dtype, "in")
533 in0.quantization = qp
534 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
535 out = Tensor(out_shape, out_dtype, "out")
536 out.quantization = qp.clone()
537 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100538 conv_out_tens = Tensor(in_shape, in_dtype, "output")
539 conv_out_tens.quantization = qp.clone()
Louis Verhaardebf4af62021-01-27 15:57:57 +0100540 weight_tens = Tensor([kernel_size, kernel_size, in_shape[-1], out_shape[-1]], in_dtype, "weights")
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100541 weight_tens.values = np.zeros(weight_tens.shape)
542 weight_tens.quant_values = np.zeros(weight_tens.shape, np.int8)
543 weight_tens.quantization = qp.clone()
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100544 bias_tens = Tensor(out_shape, pad_dtype, "biases")
545 attrs = {"padding": pad_setting, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1}
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100546 attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1)
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100547 conv2d_op = testutil.create_op(Op.Conv2DBias, [out, weight_tens, bias_tens], conv_out_tens, attrs)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100548 conv2d_op.add_input_tensor(out)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100549 return op
550
551
552def test_constraint_pad_input_count():
553 # Incorrect number of input tensors (2)
554 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
555 assert support.is_operator_supported(op)
556 op.add_input_tensor(op.inputs[0].clone())
557 assert not support.is_operator_supported(op)
558
559
560def test_constraint_padded_dimensions():
561 # Incorrect padding dimensions, can only pad width and height
562 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [1, 1], [0, 0]],)
563 assert not support.is_operator_supported(op)
564
565
566def test_constraint_pad_shape():
567 # PAD operator must be of shape (4,2)
568 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],)
569 assert not support.is_operator_supported(op)
570
571
572def test_constraint_pad_none():
573 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[],)
574 assert not support.is_operator_supported(op)
575
576
577def test_constraint_pad_dtype():
578 # PAD operator dtype should be int32 or int64
579 op = create_pad_op(
580 in_shape=[1, 1, 1, 1],
581 out_shape=[1, 3, 3, 1],
582 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
583 pad_dtype=DataType.int16,
584 )
585 assert not support.is_operator_supported(op)
586
587
588def test_constraint_pad_consumer():
589 # PAD operator must be followed by a valid consumer with Padding.VALID attribute
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100590 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
591 assert support.is_operator_supported(op)
592 op = create_pad_op(
593 in_shape=[1, 1, 1, 1],
594 out_shape=[1, 3, 3, 1],
595 padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
596 pad_setting=Padding.SAME,
597 )
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100598 assert not support.is_operator_supported(op)
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100599 op_consumer = testutil.create_op_with_quant_tensors(Op.ConcatTFLite, [1, 1, 1, 4], [1, 1, 1, 8])
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100600 op.ofm.consumer_list = [op_consumer]
601 assert not support.is_operator_supported(op)
602 op_consumer = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
603 op_consumer.attrs = {
604 "stride_w": 2,
605 "stride_h": 2,
606 "filter_width": 2,
607 "filter_height": 2,
608 "padding": Padding.VALID,
609 }
610 op.ofm.consumer_list = [op_consumer]
611 assert not support.is_operator_supported(op)
612
613
Louis Verhaardebf4af62021-01-27 15:57:57 +0100614pad_invalid_size_test_data = [
615 (2, 1, 1, 1),
616 (1, 2, 1, 1),
617 (1, 1, 2, 1),
618 (1, 1, 1, 2),
619]
620
621
622@pytest.mark.parametrize("top, left, bottom, right", pad_invalid_size_test_data)
623def test_constraint_pad_size(top, left, bottom, right):
624 # Tests PAD operator with a padding that is too high to be handled by the NPU
625 out_shape = [1, 11 + left + right, 11 + top + bottom, 1]
626 padding = [[0, 0], [top, bottom], [left, right], [0, 0]]
627 op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding,)
628 assert not support.is_operator_supported(op)
629
630
631leading_pad_test_data = [
632 (2, 2, 11, True),
633 (1, 2, 11, False),
634 (2, 1, 11, False),
635 (5, 2, 11, True),
636]
637
638
639@pytest.mark.parametrize("top, left, kernel_size, expected", leading_pad_test_data)
640def test_constraint_leading_pad_size(top, left, kernel_size, expected):
641 # Tests PAD operator with big kernel size; top and left pad must be multiple of stride
642 out_shape = [1, 11 + left, 11 + top, 1]
643 padding = [[0, 0], [top, 0], [left, 0], [0, 0]]
644 op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding, kernel_size=kernel_size)
645 assert support.is_operator_supported(op) == expected
646
647
Michael McGeagh65fd9982020-10-20 11:49:28 +0100648def create_strided_slice():
649 # Creates a valid strided slice operator with some valid inputs/outputs
650 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
651 op.attrs["begin_mask"] = 1
652 op.attrs["end_mask"] = 9
653 assert support.is_operator_supported(op)
654 return op
655
656
657def test_constraint_stridedslice_input_count():
658 # Wrong number of input tensors
659 op = create_strided_slice()
660 op.add_input_tensor(op.inputs[0].clone())
661 assert not support.is_operator_supported(op)
662
663
664def test_constraint_stridedslice_inputs_const():
665 # begin, end, stride values must not be None
666 op = create_strided_slice()
667 op.inputs[1].values = None
668 assert not support.is_operator_supported(op)
669 op = create_strided_slice()
670 op.inputs[2].values = None
671 assert not support.is_operator_supported(op)
672 op = create_strided_slice()
673 op.inputs[3].values = None
674 assert not support.is_operator_supported(op)
675
676
Michael McGeagh65fd9982020-10-20 11:49:28 +0100677def test_constraint_stridedslice_stride_values():
678 # Unsupported strides
679 op = create_strided_slice()
680 op.inputs[3].values = [1, 1, 2, 1]
681 assert not support.is_operator_supported(op)
682
683
684def test_constraint_ellipsis_mask():
685 # Unsupported ellipsis mask
686 op = create_strided_slice()
687 op.attrs["ellipsis_mask"] = 1
688 assert not support.is_operator_supported(op)
689
690
691def test_constraint_axis_masks():
692 op = create_strided_slice()
693 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
694 op.attrs["new_axis_mask"] = 2
695 assert support.is_operator_supported(op)
696 op = create_strided_slice()
697 op.attrs["shrink_axis_mask"] = 3
698 assert support.is_operator_supported(op)
699 # But setting both to non-zero is not supported
700 op.attrs["new_axis_mask"] = 2
701 assert not support.is_operator_supported(op)
702
703
704def test_constraint_slice_ranges():
705 # Examples where end offset <= begin offset
706 op = create_strided_slice()
707 op.inputs[1].values = [0, 7, 2, 0]
708 assert not support.is_operator_supported(op)
709 op = create_strided_slice()
710 op.inputs[2].values = [0, 7, 2, 0]
711 assert not support.is_operator_supported(op)
712 op = create_strided_slice()
713 op.attrs["begin_mask"] = 0
714 assert not support.is_operator_supported(op)
715 op = create_strided_slice()
716 op.attrs["end_mask"] = 0
717 assert not support.is_operator_supported(op)
718
719
720def test_constraint_matching_inputs_types():
721 # input data types must match (default is uint8)
722 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
723 op.ifm2.dtype = DataType.int8
724 assert not support.is_operator_supported(op)
725
726
727def test_constraint_matching_signed():
728 # signed inputs require output to also be signed
729 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
730 op.ofm.dtype = DataType.uint8
731 assert not support.is_operator_supported(op)
732
733
734def test_constraint_unsigned_valid():
735 # unsigned inputs require output to be either:
736 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
737 # the same (default uint8)
738 assert support.is_operator_supported(op)
739 op.ofm.dtype = DataType.int8
740 assert not support.is_operator_supported(op)
741 op.ofm.dtype = DataType.int16
742 assert not support.is_operator_supported(op)
743 # or int32
744 op.ofm.dtype = DataType.int32
745 assert support.is_operator_supported(op)
746
747
748def test_constraint_inputs_int32():
749 # both inputs must be type int32
750 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
751 assert not support.is_operator_supported(op)
752 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
753 assert support.is_operator_supported(op)
754 op.ifm2.dtype = DataType.int16
755 assert not support.is_operator_supported(op)
756
757
758def test_constraint_output_int32():
759 # output must be type int32
760 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
761 assert support.is_operator_supported(op)
762 op.ofm.dtype = DataType.int16
763 assert not support.is_operator_supported(op)
764
765
766def test_constraint_matching_quantization_parameters():
767 qp = QuantizationParameters()
768 qp.scale_f32 = np.float32(1.5)
769 qp.zero_point = 128
770 # valid - all matching (uses default quant params)
771 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
772 assert support.is_operator_supported(op)
773 # invalid - ifm mismatch ofm
774 op.ifm.quantization = qp
775 assert not support.is_operator_supported(op)
776 # invalid - ifm2 mismatch ofm
777 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
778 op.ifm2.quantization = qp
779 assert not support.is_operator_supported(op)
780 # invalid - both ifm and ifm2 mismatch ofm
781 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
782 op.ifm.quantization = qp
783 op.ifm2.quantization = qp
784 assert not support.is_operator_supported(op)
785 # valid - all matching
786 op.ofm.quantization = qp
787 assert support.is_operator_supported(op)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100788 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
789 assert support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100790
791
792def test_constraint_elemwise_batch_size():
793 # BINARY CASE
794 # Batch can be >1 if dims is <=2D
795 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
796 assert support.is_operator_supported(op)
797 # For dims >2D, batch must be 1
798 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
799 assert support.is_operator_supported(op)
800 # invalid case
801 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
802 assert not support.is_operator_supported(op)
803
804 # UNARY CASE
805 # Batch can be >1 if dims is <=2D
806 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
807 assert support.is_operator_supported(op)
808 # For dims >2D, batch must be 1
809 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
810 assert support.is_operator_supported(op)
811 # invalid case
812 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
813 assert not support.is_operator_supported(op)
814
815
816def test_constraint_matching_either_shapes():
817 # BINARY CASE
818 # At least one ifm shape must match ofm's shape
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100819 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100820 assert support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100821 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [1, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100822 assert support.is_operator_supported(op)
823 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
824 assert not support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100825 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 4, 16])
826 assert not support.is_operator_supported(op)
827 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 4, 16])
828 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100829
830 # UNARY CASE
831 # No second input so this is treated the same as requiring ifm shape to match ofm shape
832 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
833 assert support.is_operator_supported(op)
834 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
835 assert not support.is_operator_supported(op)
836
837
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100838def test_constraint_broadcast_shapes():
839 # BINARY CASE
840 # Only allow broadcast to 1 dim, for 1 rank index
841 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
842 assert support.is_operator_supported(op)
843 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
844 assert support.is_operator_supported(op)
845 # Only allow broadcast to 1 dim, for 3 rank indexes
846 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
847 assert support.is_operator_supported(op)
848 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
849 assert support.is_operator_supported(op)
850 # One broadcast dim not 1
851 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
852 assert not support.is_operator_supported(op)
853 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
854 assert not support.is_operator_supported(op)
855 # OFM shape dim largest ifm/ifm2 shape dim
856 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
857 assert not support.is_operator_supported(op)
858 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
859 assert not support.is_operator_supported(op)
860 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
861 assert not support.is_operator_supported(op)
862 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
863 assert not support.is_operator_supported(op)
864
865
Michael McGeagh65fd9982020-10-20 11:49:28 +0100866def test_constraint_alpha_valid():
867 # Alpha cannot be negative
868 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
869 op.attrs["alpha"] = 0
870 assert support.is_operator_supported(op)
871 op.attrs["alpha"] = -1
872 assert not support.is_operator_supported(op)
Diqing Zhong189f7482021-01-26 12:12:51 +0100873
874
875def test_constraint_hardswish_dtype():
876 # HardSwish operator dtype should be int8 or uint8, and input dtype must match output
877 # UINT8
878 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8])
879 assert support.is_operator_supported(op)
880 # INT8
881 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
882 assert support.is_operator_supported(op)
883
884 # Invalid
885 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int16)
886 assert not support.is_operator_supported(op)
887 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.uint16)
888 assert not support.is_operator_supported(op)
889 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
890 assert not support.is_operator_supported(op)
891
892 in_tens = Tensor([1, 8, 8, 8], DataType.int8, "in")
893 out_tens = Tensor([1, 8, 8, 8], DataType.uint8, "out")
894 op = testutil.create_op(Op.HardSwish, [in_tens], out_tens)
895 assert not support.is_operator_supported(op)