blob: 3e9724d3041edc63dc9dfc87035a3b5bc2518406 [file] [log] [blame]
Louis Verhaardebf4af62021-01-27 15:57:57 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
Louis Verhaardebf4af62021-01-27 15:57:57 +010020import pytest
Michael McGeagh37ded342020-10-01 15:37:44 +010021
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020022from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010023from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020024from ethosu.vela.operation import Op
Michael McGeagh16895482020-12-14 15:51:20 +000025from ethosu.vela.operation import Padding
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020026from ethosu.vela.supported_operators import SupportedOperators
27from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010028from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020029from ethosu.vela.tensor import Tensor
30from ethosu.vela.test import testutil
31
32support = SupportedOperators()
33
34
Michael McGeagh65fd9982020-10-20 11:49:28 +010035def test_constraint_tens_no_dynamic():
36 # Tensors cannot be dynamic (no shape, not a scalar)
37 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020038 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010039
40
41def test_constraint_tens_defined_shape():
42 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010043 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010044 assert not support.is_operator_supported(op)
45
46
Michael McGeagh65fd9982020-10-20 11:49:28 +010047def test_constraint_tens_output_scalar():
48 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010049 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010050 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010051 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010052
53
Michael McGeagh65fd9982020-10-20 11:49:28 +010054def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010055 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010056 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010057 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010058 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010059 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010060 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010061 assert not support.is_operator_supported(op)
62
63
64def test_constraint_tens_shape_size():
65 # Tensors cannot be > 4D
patrik.gustavssoneeb85152020-12-21 17:10:40 +000066 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8], set_ifm_ofm_shapes=False)
Michael McGeagh37ded342020-10-01 15:37:44 +010067 assert not support.is_operator_supported(op)
68
69
70def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010071 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010072 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010073 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010074
75
76def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010077 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010080 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010081 assert not support.is_operator_supported(op)
82
83
84def test_constraint_tens_dimension():
85 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010086 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010087 assert not support.is_operator_supported(op)
88
89
Michael McGeagh184b2502020-10-09 17:19:52 +010090def test_constraint_tens_quant_none_check():
91 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010092 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010093 assert not support.is_operator_supported(op)
94
95
96def test_constraint_tens_quant_scale():
Louis Verhaard9a0cff12021-01-08 11:17:33 +010097 # Quantization scale cannot be infinite
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010099 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100101 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100102 assert not support.is_operator_supported(op)
103
104
Dwight Lidmanc7187432020-11-16 17:40:46 +0100105def test_constraint_tens_quant_per_axis_not_supp():
106 # Quantization scale cannot be array-valued for elemwise ops
107 qp = QuantizationParameters()
108 qp.zero_point = np.zeros((1, 3))
109 qp.scale_f32 = np.ones((1, 3))
110 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_tens_quant_per_axis_is_supp():
115 op = testutil.create_op_with_quant_tensors(
116 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
117 )
118 op.attrs = {"stride_w": 1, "stride_h": 1}
119 assert support.is_operator_supported(op)
120 qp = QuantizationParameters()
121 qp.zero_point = np.zeros((1, 3))
122 qp.scale_f32 = np.ones((1, 3))
123 op.bias.quantization = qp
124 assert support.is_operator_supported(op)
125
126
Dwight Lidman0dd21c72020-11-24 13:45:50 +0100127def test_constraint_fc_output_2d_not_supp():
128 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1], [3, 2, 2, 1], weights_shape=[12, 1, 1, 1])
129 assert not support.is_operator_supported(op)
130 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1, 1, 1], [1, 3, 4], weights_shape=[12, 1, 1, 1])
131 assert not support.is_operator_supported(op)
132 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1, 1, 1], [1], weights_shape=[1, 1, 1, 1])
133 assert not support.is_operator_supported(op)
134
135
136def test_constraint_fc_output_2d_is_supp():
137 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
138 assert support.is_operator_supported(op)
139 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
140 assert support.is_operator_supported(op)
141
142
Michael McGeagh37ded342020-10-01 15:37:44 +0100143def test_constraint_faf():
144 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100145 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100146 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100147 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100148
149
Louis Verhaardc7761512021-02-03 10:22:38 +0100150def test_constraint_faf_ofm_dtype():
151 # If fused activation function is present, OFM must be 8 or 16 bit
152 shp = [1, 8, 8, 8]
153 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
154 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
155 op.activation = ActivationFunction(Op.Relu)
156 expected = dtype.size_in_bytes() <= 2
157 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
158
159
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100160def test_constraint_conv_pass():
161 # First test a simple conv passes
162 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
163 op.attrs = {"stride_w": 1, "stride_h": 1}
164 assert support.is_operator_supported(op)
165
166
167def test_constraint_stride_type():
168 # Stride width and height must be integer types
169 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
170 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
171 assert not support.is_operator_supported(op)
172
173
174def test_constraint_stride_range():
175 # Stride width and height must lie within a certain range
176 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
177 op.attrs = {"stride_w": 0, "stride_h": 20}
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_dilation_type():
182 # Dilation width and height must be integer types
183 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
184 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
185 assert not support.is_operator_supported(op)
186
187
188def test_constraint_dilation_range():
189 # Dilation width and height must lie within a certain range
190 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
191 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
192 assert not support.is_operator_supported(op)
193
194
195def test_constraint_dilated_height_range():
196 # Dilated kernel height must lie within a certain range
197 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
198 op.attrs = {"stride_w": 1, "stride_h": 1}
199 assert not support.is_operator_supported(op)
200
201
202def test_constraint_dilated_product_range():
203 # Dilated kernel width x height must lie within a certain range
204 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
205 op.attrs = {"stride_w": 1, "stride_h": 1}
206 assert not support.is_operator_supported(op)
207
208
209def test_constraint_weights_type():
210 # Weight tensor must be 8-bit
211 op = testutil.create_op_with_quant_tensors(
212 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
213 )
214 op.attrs = {"stride_w": 1, "stride_h": 1}
215 assert not support.is_operator_supported(op)
216
217
Michael McGeagh65fd9982020-10-20 11:49:28 +0100218def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100219 # Weight tensor cannot be non-const tensors
220 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
221 op.attrs = {"stride_w": 1, "stride_h": 1}
222 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100223 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100224 op.add_input_tensor(weights)
225 assert not support.is_operator_supported(op)
226
227
228def test_constraint_weights_limit():
229 # Sum of weights has a limit
230 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
231 op.attrs = {"stride_w": 1, "stride_h": 1}
232 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
233 assert not support.is_operator_supported(op)
234
235
236def test_constraint_bias_type():
237 # Bias must have a certain datatype
238 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
239 op.attrs = {"stride_w": 1, "stride_h": 1}
240 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
241 op.add_input_tensor(bias)
242 assert not support.is_operator_supported(op)
243
244
245def test_constraint_bias_40bit():
246 # Bias must not exceed 40-bit
247 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
248 op.attrs = {"stride_w": 1, "stride_h": 1}
249 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100250 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100251 op.add_input_tensor(bias)
252 assert not support.is_operator_supported(op)
253
254
255def test_constraint_batch_size():
256 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
257 op.attrs = {"stride_w": 1, "stride_h": 1}
258 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100259
260
261def test_constraint_quant_scale_inf():
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100262 # Test handling IFM scale/OFM scale is infinite
Michael McGeagh65fd9982020-10-20 11:49:28 +0100263 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100264 op.ifm.quantization.scale_f32 = np.float32(1e9)
265 op.ofm.quantization.scale_f32 = np.float32(1e-35)
266 assert not support.is_operator_supported(op)
267
268
269def test_constraint_ofm_scale_too_small():
270 # Tests handling of OFM scale < 1e-38
271 shp = [1, 10, 20, 16]
272 op = testutil.create_elemwise_op(Op.Mul, "mul", shp, shp, shp, ofm_quant=testutil.default_quant_params(),)
273 assert support.is_operator_supported(op)
274 op.ofm.quantization.scale_f32 = 1e-43
Michael McGeagh65fd9982020-10-20 11:49:28 +0100275 assert not support.is_operator_supported(op)
276
277
278def test_constraint_depth_multiplier():
279 # Valid. Depth multiplier is 1 so no further constraints
280 op = testutil.create_op_with_quant_tensors(
281 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
282 )
283 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
284 assert support.is_operator_supported(op)
285 # Invalid. Depth multiplier doesnt equal ofm channel
286 op = testutil.create_op_with_quant_tensors(
287 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
288 )
289 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
290 assert not support.is_operator_supported(op)
291 # Valid. Depth multiplier is equal to ofm channel
292 op = testutil.create_op_with_quant_tensors(
293 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
294 )
295 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
296 assert support.is_operator_supported(op)
297
298
299def test_constraint_tconv_stride():
300 # Strides must be 2
301 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000302 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100303 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
304 ifm.quantization = testutil.default_quant_params()
305 op.add_input_tensor(ifm)
306 assert not support.is_operator_supported(op)
307
308
309def test_constraint_tconv_same():
310 # Valid
311 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000312 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100313 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
314 ifm.quantization = testutil.default_quant_params()
315 op.add_input_tensor(ifm)
316 assert support.is_operator_supported(op)
317 # Invalid
318 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000319 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100320 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
321 ifm.quantization = testutil.default_quant_params()
322 op.add_input_tensor(ifm)
323 assert not support.is_operator_supported(op)
324
325
326def test_constraint_tconv_valid():
327 # Valid
328 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000329 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100330 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
331 ifm.quantization = testutil.default_quant_params()
332 op.add_input_tensor(ifm)
333 assert support.is_operator_supported(op)
334 # Invalid
335 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000336 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100337 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
338 ifm.quantization = testutil.default_quant_params()
339 op.add_input_tensor(ifm)
340 assert not support.is_operator_supported(op)
341
342
343def test_constraint_matching_in_out_types():
344 # Valid
345 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000346 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100347 assert support.is_operator_supported(op)
348 # Invalid. datatypes for ifm and ofm must match (default uint8)
349 op.ifm.dtype = DataType.int8
350 assert not support.is_operator_supported(op)
351
352
353def test_constraint_filter_type():
354 # Filter width/height must be integers
355 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000356 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100357 assert not support.is_operator_supported(op)
358
359
360def test_constraint_filter_range():
361 # Avg pool restrictions are dependent on padding:
362 # SAME padding restricts both W and H to max 8
363 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000364 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100365 assert not support.is_operator_supported(op)
366 # VALID padding limits are much larger
Michael McGeagh16895482020-12-14 15:51:20 +0000367 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100368 assert support.is_operator_supported(op)
369
370
371def test_constraint_filter_height_range_valid_pad():
372 # Avg pool restrictions are dependent on padding:
373 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000374 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100375 assert support.is_operator_supported(op)
376 # VALID padding restricts to 256 in filter height
377 op.attrs["filter_height"] = 257
378 assert not support.is_operator_supported(op)
379
380
381def test_constraint_filter_product_height_range_valid_pad():
382 # Avg pool restrictions are dependent on padding:
383 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000384 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100385 assert support.is_operator_supported(op)
386 # VALID padding restricts filter W x H to 256x256
387 op.attrs["filter_width"] = 257
388 assert not support.is_operator_supported(op)
389
390
391def test_constraint_filter_height_range():
392 # Max pool restrictions arent dependent on padding
393 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000394 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100395 assert support.is_operator_supported(op)
396 # Restricts to 256 in filter height
397 op.attrs["filter_height"] = 257
398 assert not support.is_operator_supported(op)
399 # Doesnt matter if SAME or VALID
Michael McGeagh16895482020-12-14 15:51:20 +0000400 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100401 assert not support.is_operator_supported(op)
402
403
404def test_constraint_filter_product_height_range():
405 # Max pool restrictions arent dependent on padding
406 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000407 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100408 assert support.is_operator_supported(op)
409 # Restricts filter W x H to 256x256
410 op.attrs["filter_width"] = 257
411 assert not support.is_operator_supported(op)
412 # Doesnt matter if SAME or VALID
Michael McGeagh16895482020-12-14 15:51:20 +0000413 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100414 assert not support.is_operator_supported(op)
415
416
417def test_constraint_resize():
418 # IFM W and H == 1
419 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
420 assert support.is_operator_supported(op)
421 # IFM == OFM
422 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
423 assert support.is_operator_supported(op)
424 # IFM x2 == OFM ; align_corners = False
425 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
426 assert support.is_operator_supported(op)
427 # IFM x2 -1 == OFM ; align_corners = True
428 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
429 op.attrs["align_corners"] = True
430 assert support.is_operator_supported(op)
431 # Invalid cases
432 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
433 assert not support.is_operator_supported(op)
434 op.attrs["align_corners"] = True
435 assert not support.is_operator_supported(op)
436
437
438def test_constraint_matching_shapes():
439 # Softmax requires the ifm and ofm shapes to match
440 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
441 assert not support.is_operator_supported(op)
442 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
443 assert support.is_operator_supported(op)
444
445
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100446def test_constraint_beta_value_range():
447 # beta must be positive
448 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
449 op.attrs["beta"] = -1.0
450 assert not support.is_operator_supported(op)
451 op.attrs["beta"] = 0.0
452 assert support.is_operator_supported(op)
453
454
Michael McGeagh65fd9982020-10-20 11:49:28 +0100455def test_constraint_splitv_inferred():
456 # SplitV requires a maximum of one inferred shape (-1)
457 qp = testutil.default_quant_params()
458 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
459 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
460 op.add_input_tensor(sizes)
461 assert not support.is_operator_supported(op)
462 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
463 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
464 op.add_input_tensor(sizes)
465 assert support.is_operator_supported(op)
466
467
468def test_constraint_concat_pass():
469 # A working concat
470 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
471 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
472 ifm2.quantization = testutil.default_quant_params()
473 op.add_input_tensor(ifm2)
474 op.attrs["axis"] = 3
475 assert support.is_operator_supported(op)
476
477
478def test_constraint_axis_exists():
479 # Missing axis attribute
480 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
481 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
482 ifm2.quantization = testutil.default_quant_params()
483 op.add_input_tensor(ifm2)
484 assert not support.is_operator_supported(op)
485
486
487def test_constraint_axis_valid():
488 # Invalid axis attribute
489 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
490 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
491 ifm2.quantization = testutil.default_quant_params()
492 op.add_input_tensor(ifm2)
493 op.attrs["axis"] = 7
494 assert not support.is_operator_supported(op)
495
496
497def test_constraint_matching_dimensionality():
498 # Mismatching dimensionality: 4D+2D=4D
499 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
500 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
501 ifm2.quantization = testutil.default_quant_params()
502 op.add_input_tensor(ifm2)
503 op.attrs["axis"] = 3
504 assert not support.is_operator_supported(op)
505
506
507def test_constraint_valid_dimensions():
508 # Mismatching dimension value:
509 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
510 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
511 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
512 ifm2.quantization = testutil.default_quant_params()
513 op.add_input_tensor(ifm2)
514 op.attrs["axis"] = 3
515 assert not support.is_operator_supported(op)
516
517
518def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
519 qp = testutil.default_quant_params()
520 in0 = Tensor(in_shape, DataType.uint8, "in")
521 in0.quantization = qp
522 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
523 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
524 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
525 out = Tensor(out_shape, DataType.uint8, "out")
526 out.quantization = qp
527 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
528 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
529
530
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100531def create_pad_op(
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100532 in_shape,
533 out_shape,
534 padding,
535 in_dtype=DataType.int8,
536 out_dtype=DataType.int8,
537 pad_dtype=DataType.int32,
538 pad_setting=Padding.VALID,
Louis Verhaardebf4af62021-01-27 15:57:57 +0100539 kernel_size=3,
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100540):
541 qp = testutil.default_quant_params()
542 in0 = Tensor(in_shape, in_dtype, "in")
543 in0.quantization = qp
544 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
545 out = Tensor(out_shape, out_dtype, "out")
546 out.quantization = qp.clone()
547 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100548 conv_out_tens = Tensor(in_shape, in_dtype, "output")
549 conv_out_tens.quantization = qp.clone()
Louis Verhaardebf4af62021-01-27 15:57:57 +0100550 weight_tens = Tensor([kernel_size, kernel_size, in_shape[-1], out_shape[-1]], in_dtype, "weights")
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100551 weight_tens.values = np.zeros(weight_tens.shape)
552 weight_tens.quant_values = np.zeros(weight_tens.shape, np.int8)
553 weight_tens.quantization = qp.clone()
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100554 bias_tens = Tensor(out_shape, pad_dtype, "biases")
555 attrs = {"padding": pad_setting, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1}
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100556 attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1)
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100557 conv2d_op = testutil.create_op(Op.Conv2DBias, [out, weight_tens, bias_tens], conv_out_tens, attrs)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100558 conv2d_op.add_input_tensor(out)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100559 return op
560
561
562def test_constraint_pad_input_count():
563 # Incorrect number of input tensors (2)
564 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
565 assert support.is_operator_supported(op)
566 op.add_input_tensor(op.inputs[0].clone())
567 assert not support.is_operator_supported(op)
568
569
570def test_constraint_padded_dimensions():
571 # Incorrect padding dimensions, can only pad width and height
572 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [1, 1], [0, 0]],)
573 assert not support.is_operator_supported(op)
574
575
576def test_constraint_pad_shape():
577 # PAD operator must be of shape (4,2)
578 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],)
579 assert not support.is_operator_supported(op)
580
581
582def test_constraint_pad_none():
583 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[],)
584 assert not support.is_operator_supported(op)
585
586
587def test_constraint_pad_dtype():
588 # PAD operator dtype should be int32 or int64
589 op = create_pad_op(
590 in_shape=[1, 1, 1, 1],
591 out_shape=[1, 3, 3, 1],
592 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
593 pad_dtype=DataType.int16,
594 )
595 assert not support.is_operator_supported(op)
596
597
598def test_constraint_pad_consumer():
599 # PAD operator must be followed by a valid consumer with Padding.VALID attribute
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100600 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
601 assert support.is_operator_supported(op)
602 op = create_pad_op(
603 in_shape=[1, 1, 1, 1],
604 out_shape=[1, 3, 3, 1],
605 padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
606 pad_setting=Padding.SAME,
607 )
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100608 assert not support.is_operator_supported(op)
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100609 op_consumer = testutil.create_op_with_quant_tensors(Op.ConcatTFLite, [1, 1, 1, 4], [1, 1, 1, 8])
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100610 op.ofm.consumer_list = [op_consumer]
611 assert not support.is_operator_supported(op)
612 op_consumer = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
613 op_consumer.attrs = {
614 "stride_w": 2,
615 "stride_h": 2,
616 "filter_width": 2,
617 "filter_height": 2,
618 "padding": Padding.VALID,
619 }
620 op.ofm.consumer_list = [op_consumer]
621 assert not support.is_operator_supported(op)
622
623
Louis Verhaardebf4af62021-01-27 15:57:57 +0100624pad_invalid_size_test_data = [
625 (2, 1, 1, 1),
626 (1, 2, 1, 1),
627 (1, 1, 2, 1),
628 (1, 1, 1, 2),
629]
630
631
632@pytest.mark.parametrize("top, left, bottom, right", pad_invalid_size_test_data)
633def test_constraint_pad_size(top, left, bottom, right):
634 # Tests PAD operator with a padding that is too high to be handled by the NPU
635 out_shape = [1, 11 + left + right, 11 + top + bottom, 1]
636 padding = [[0, 0], [top, bottom], [left, right], [0, 0]]
637 op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding,)
638 assert not support.is_operator_supported(op)
639
640
641leading_pad_test_data = [
642 (2, 2, 11, True),
643 (1, 2, 11, False),
644 (2, 1, 11, False),
645 (5, 2, 11, True),
646]
647
648
649@pytest.mark.parametrize("top, left, kernel_size, expected", leading_pad_test_data)
650def test_constraint_leading_pad_size(top, left, kernel_size, expected):
651 # Tests PAD operator with big kernel size; top and left pad must be multiple of stride
652 out_shape = [1, 11 + left, 11 + top, 1]
653 padding = [[0, 0], [top, 0], [left, 0], [0, 0]]
654 op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding, kernel_size=kernel_size)
655 assert support.is_operator_supported(op) == expected
656
657
Michael McGeagh65fd9982020-10-20 11:49:28 +0100658def create_strided_slice():
659 # Creates a valid strided slice operator with some valid inputs/outputs
660 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
661 op.attrs["begin_mask"] = 1
662 op.attrs["end_mask"] = 9
663 assert support.is_operator_supported(op)
664 return op
665
666
667def test_constraint_stridedslice_input_count():
668 # Wrong number of input tensors
669 op = create_strided_slice()
670 op.add_input_tensor(op.inputs[0].clone())
671 assert not support.is_operator_supported(op)
672
673
674def test_constraint_stridedslice_inputs_const():
675 # begin, end, stride values must not be None
676 op = create_strided_slice()
677 op.inputs[1].values = None
678 assert not support.is_operator_supported(op)
679 op = create_strided_slice()
680 op.inputs[2].values = None
681 assert not support.is_operator_supported(op)
682 op = create_strided_slice()
683 op.inputs[3].values = None
684 assert not support.is_operator_supported(op)
685
686
Michael McGeagh65fd9982020-10-20 11:49:28 +0100687def test_constraint_stridedslice_stride_values():
688 # Unsupported strides
689 op = create_strided_slice()
690 op.inputs[3].values = [1, 1, 2, 1]
691 assert not support.is_operator_supported(op)
692
693
694def test_constraint_ellipsis_mask():
695 # Unsupported ellipsis mask
696 op = create_strided_slice()
697 op.attrs["ellipsis_mask"] = 1
698 assert not support.is_operator_supported(op)
699
700
701def test_constraint_axis_masks():
702 op = create_strided_slice()
703 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
704 op.attrs["new_axis_mask"] = 2
705 assert support.is_operator_supported(op)
706 op = create_strided_slice()
707 op.attrs["shrink_axis_mask"] = 3
708 assert support.is_operator_supported(op)
709 # But setting both to non-zero is not supported
710 op.attrs["new_axis_mask"] = 2
711 assert not support.is_operator_supported(op)
712
713
714def test_constraint_slice_ranges():
715 # Examples where end offset <= begin offset
716 op = create_strided_slice()
717 op.inputs[1].values = [0, 7, 2, 0]
718 assert not support.is_operator_supported(op)
719 op = create_strided_slice()
720 op.inputs[2].values = [0, 7, 2, 0]
721 assert not support.is_operator_supported(op)
722 op = create_strided_slice()
723 op.attrs["begin_mask"] = 0
724 assert not support.is_operator_supported(op)
725 op = create_strided_slice()
726 op.attrs["end_mask"] = 0
727 assert not support.is_operator_supported(op)
728
729
730def test_constraint_matching_inputs_types():
731 # input data types must match (default is uint8)
732 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
733 op.ifm2.dtype = DataType.int8
734 assert not support.is_operator_supported(op)
735
736
737def test_constraint_matching_signed():
738 # signed inputs require output to also be signed
739 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
740 op.ofm.dtype = DataType.uint8
741 assert not support.is_operator_supported(op)
742
743
744def test_constraint_unsigned_valid():
745 # unsigned inputs require output to be either:
746 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
747 # the same (default uint8)
748 assert support.is_operator_supported(op)
749 op.ofm.dtype = DataType.int8
750 assert not support.is_operator_supported(op)
751 op.ofm.dtype = DataType.int16
752 assert not support.is_operator_supported(op)
753 # or int32
754 op.ofm.dtype = DataType.int32
755 assert support.is_operator_supported(op)
756
757
758def test_constraint_inputs_int32():
759 # both inputs must be type int32
760 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
761 assert not support.is_operator_supported(op)
762 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
763 assert support.is_operator_supported(op)
764 op.ifm2.dtype = DataType.int16
765 assert not support.is_operator_supported(op)
766
767
768def test_constraint_output_int32():
769 # output must be type int32
770 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
771 assert support.is_operator_supported(op)
772 op.ofm.dtype = DataType.int16
773 assert not support.is_operator_supported(op)
774
775
776def test_constraint_matching_quantization_parameters():
777 qp = QuantizationParameters()
778 qp.scale_f32 = np.float32(1.5)
779 qp.zero_point = 128
780 # valid - all matching (uses default quant params)
781 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
782 assert support.is_operator_supported(op)
783 # invalid - ifm mismatch ofm
784 op.ifm.quantization = qp
785 assert not support.is_operator_supported(op)
786 # invalid - ifm2 mismatch ofm
787 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
788 op.ifm2.quantization = qp
789 assert not support.is_operator_supported(op)
790 # invalid - both ifm and ifm2 mismatch ofm
791 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
792 op.ifm.quantization = qp
793 op.ifm2.quantization = qp
794 assert not support.is_operator_supported(op)
795 # valid - all matching
796 op.ofm.quantization = qp
797 assert support.is_operator_supported(op)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100798 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
799 assert support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100800
801
802def test_constraint_elemwise_batch_size():
803 # BINARY CASE
804 # Batch can be >1 if dims is <=2D
805 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
806 assert support.is_operator_supported(op)
807 # For dims >2D, batch must be 1
808 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
809 assert support.is_operator_supported(op)
810 # invalid case
811 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
812 assert not support.is_operator_supported(op)
813
814 # UNARY CASE
815 # Batch can be >1 if dims is <=2D
816 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
817 assert support.is_operator_supported(op)
818 # For dims >2D, batch must be 1
819 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
820 assert support.is_operator_supported(op)
821 # invalid case
822 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
823 assert not support.is_operator_supported(op)
824
825
826def test_constraint_matching_either_shapes():
827 # BINARY CASE
828 # At least one ifm shape must match ofm's shape
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100829 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100830 assert support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100831 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [1, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100832 assert support.is_operator_supported(op)
833 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
834 assert not support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100835 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 4, 16])
836 assert not support.is_operator_supported(op)
837 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 4, 16])
838 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100839
840 # UNARY CASE
841 # No second input so this is treated the same as requiring ifm shape to match ofm shape
842 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
843 assert support.is_operator_supported(op)
844 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
845 assert not support.is_operator_supported(op)
846
847
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100848def test_constraint_broadcast_shapes():
849 # BINARY CASE
850 # Only allow broadcast to 1 dim, for 1 rank index
851 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
852 assert support.is_operator_supported(op)
853 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
854 assert support.is_operator_supported(op)
855 # Only allow broadcast to 1 dim, for 3 rank indexes
856 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
857 assert support.is_operator_supported(op)
858 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
859 assert support.is_operator_supported(op)
860 # One broadcast dim not 1
861 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
862 assert not support.is_operator_supported(op)
863 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
864 assert not support.is_operator_supported(op)
865 # OFM shape dim largest ifm/ifm2 shape dim
866 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
867 assert not support.is_operator_supported(op)
868 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
869 assert not support.is_operator_supported(op)
870 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
871 assert not support.is_operator_supported(op)
872 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
873 assert not support.is_operator_supported(op)
874
875
Michael McGeagh65fd9982020-10-20 11:49:28 +0100876def test_constraint_alpha_valid():
877 # Alpha cannot be negative
878 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
879 op.attrs["alpha"] = 0
880 assert support.is_operator_supported(op)
881 op.attrs["alpha"] = -1
882 assert not support.is_operator_supported(op)
Diqing Zhong189f7482021-01-26 12:12:51 +0100883
884
885def test_constraint_hardswish_dtype():
886 # HardSwish operator dtype should be int8 or uint8, and input dtype must match output
887 # UINT8
888 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8])
889 assert support.is_operator_supported(op)
890 # INT8
891 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
892 assert support.is_operator_supported(op)
893
894 # Invalid
895 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int16)
896 assert not support.is_operator_supported(op)
897 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.uint16)
898 assert not support.is_operator_supported(op)
899 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
900 assert not support.is_operator_supported(op)
901
902 in_tens = Tensor([1, 8, 8, 8], DataType.int8, "in")
903 out_tens = Tensor([1, 8, 8, 8], DataType.uint8, "out")
904 op = testutil.create_op(Op.HardSwish, [in_tens], out_tens)
905 assert not support.is_operator_supported(op)