blob: 595ea590ae7c523770113b687086ff33d6cedf10 [file] [log] [blame]
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
20
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020021from ethosu.vela.data_type import DataType
Louis Verhaardaee5d752020-09-30 09:01:52 +020022from ethosu.vela.operation import Op
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020023from ethosu.vela.supported_operators import SupportedOperators
24from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010025from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020026from ethosu.vela.tensor import Tensor
27from ethosu.vela.test import testutil
28
29support = SupportedOperators()
30
31
Michael McGeagh65fd9982020-10-20 11:49:28 +010032def test_constraint_tens_no_dynamic():
33 # Tensors cannot be dynamic (no shape, not a scalar)
34 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020035 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010036
37
38def test_constraint_tens_defined_shape():
39 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010040 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010041 assert not support.is_operator_supported(op)
42
43
Michael McGeagh65fd9982020-10-20 11:49:28 +010044def test_constraint_tens_output_scalar():
45 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010046 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010047 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010048 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010049
50
Michael McGeagh65fd9982020-10-20 11:49:28 +010051def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010052 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010053 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010054 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010055 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010056 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010057 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010058 assert not support.is_operator_supported(op)
59
60
61def test_constraint_tens_shape_size():
62 # Tensors cannot be > 4D
Michael McGeagh1f951fc2020-10-14 09:30:02 +010063 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010064 assert not support.is_operator_supported(op)
65
66
67def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010068 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010069 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010070 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010071
72
73def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010074 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010075 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010076 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010077 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010078 assert not support.is_operator_supported(op)
79
80
81def test_constraint_tens_dimension():
82 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010083 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010084 assert not support.is_operator_supported(op)
85
86
Michael McGeagh184b2502020-10-09 17:19:52 +010087def test_constraint_tens_quant_none_check():
88 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010089 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010090 assert not support.is_operator_supported(op)
91
92
93def test_constraint_tens_quant_scale():
94 # Quantization scale cannot be infinit
95 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010096 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +010097 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +010098 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +010099 assert not support.is_operator_supported(op)
100
101
Michael McGeagh37ded342020-10-01 15:37:44 +0100102def test_constraint_faf():
103 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100104 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaardaee5d752020-09-30 09:01:52 +0200105 op.activation = Op.Conv2D
Michael McGeagh37ded342020-10-01 15:37:44 +0100106 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100107
108
109def test_constraint_conv_pass():
110 # First test a simple conv passes
111 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
112 op.attrs = {"stride_w": 1, "stride_h": 1}
113 assert support.is_operator_supported(op)
114
115
116def test_constraint_stride_type():
117 # Stride width and height must be integer types
118 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
119 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
120 assert not support.is_operator_supported(op)
121
122
123def test_constraint_stride_range():
124 # Stride width and height must lie within a certain range
125 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
126 op.attrs = {"stride_w": 0, "stride_h": 20}
127 assert not support.is_operator_supported(op)
128
129
130def test_constraint_dilation_type():
131 # Dilation width and height must be integer types
132 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
133 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
134 assert not support.is_operator_supported(op)
135
136
137def test_constraint_dilation_range():
138 # Dilation width and height must lie within a certain range
139 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
140 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
141 assert not support.is_operator_supported(op)
142
143
144def test_constraint_dilated_height_range():
145 # Dilated kernel height must lie within a certain range
146 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
147 op.attrs = {"stride_w": 1, "stride_h": 1}
148 assert not support.is_operator_supported(op)
149
150
151def test_constraint_dilated_product_range():
152 # Dilated kernel width x height must lie within a certain range
153 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
154 op.attrs = {"stride_w": 1, "stride_h": 1}
155 assert not support.is_operator_supported(op)
156
157
158def test_constraint_weights_type():
159 # Weight tensor must be 8-bit
160 op = testutil.create_op_with_quant_tensors(
161 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
162 )
163 op.attrs = {"stride_w": 1, "stride_h": 1}
164 assert not support.is_operator_supported(op)
165
166
Michael McGeagh65fd9982020-10-20 11:49:28 +0100167def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100168 # Weight tensor cannot be non-const tensors
169 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
170 op.attrs = {"stride_w": 1, "stride_h": 1}
171 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100172 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100173 op.add_input_tensor(weights)
174 assert not support.is_operator_supported(op)
175
176
177def test_constraint_weights_limit():
178 # Sum of weights has a limit
179 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
180 op.attrs = {"stride_w": 1, "stride_h": 1}
181 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
182 assert not support.is_operator_supported(op)
183
184
185def test_constraint_bias_type():
186 # Bias must have a certain datatype
187 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
188 op.attrs = {"stride_w": 1, "stride_h": 1}
189 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
190 op.add_input_tensor(bias)
191 assert not support.is_operator_supported(op)
192
193
194def test_constraint_bias_40bit():
195 # Bias must not exceed 40-bit
196 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
197 op.attrs = {"stride_w": 1, "stride_h": 1}
198 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100199 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100200 op.add_input_tensor(bias)
201 assert not support.is_operator_supported(op)
202
203
204def test_constraint_batch_size():
205 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
206 op.attrs = {"stride_w": 1, "stride_h": 1}
207 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100208
209
210def test_constraint_quant_scale_inf():
211 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
212 op.ofm.quantization.scale_f32 = np.float32(1e-39)
213 assert not support.is_operator_supported(op)
214
215
216def test_constraint_depth_multiplier():
217 # Valid. Depth multiplier is 1 so no further constraints
218 op = testutil.create_op_with_quant_tensors(
219 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
220 )
221 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
222 assert support.is_operator_supported(op)
223 # Invalid. Depth multiplier doesnt equal ofm channel
224 op = testutil.create_op_with_quant_tensors(
225 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
226 )
227 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
228 assert not support.is_operator_supported(op)
229 # Valid. Depth multiplier is equal to ofm channel
230 op = testutil.create_op_with_quant_tensors(
231 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
232 )
233 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
234 assert support.is_operator_supported(op)
235
236
237def test_constraint_tconv_stride():
238 # Strides must be 2
239 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
240 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
241 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
242 ifm.quantization = testutil.default_quant_params()
243 op.add_input_tensor(ifm)
244 assert not support.is_operator_supported(op)
245
246
247def test_constraint_tconv_same():
248 # Valid
249 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
250 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
251 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
252 ifm.quantization = testutil.default_quant_params()
253 op.add_input_tensor(ifm)
254 assert support.is_operator_supported(op)
255 # Invalid
256 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
257 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
258 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
259 ifm.quantization = testutil.default_quant_params()
260 op.add_input_tensor(ifm)
261 assert not support.is_operator_supported(op)
262
263
264def test_constraint_tconv_valid():
265 # Valid
266 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
267 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
268 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
269 ifm.quantization = testutil.default_quant_params()
270 op.add_input_tensor(ifm)
271 assert support.is_operator_supported(op)
272 # Invalid
273 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
274 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
275 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
276 ifm.quantization = testutil.default_quant_params()
277 op.add_input_tensor(ifm)
278 assert not support.is_operator_supported(op)
279
280
281def test_constraint_matching_in_out_types():
282 # Valid
283 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
284 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"}
285 assert support.is_operator_supported(op)
286 # Invalid. datatypes for ifm and ofm must match (default uint8)
287 op.ifm.dtype = DataType.int8
288 assert not support.is_operator_supported(op)
289
290
291def test_constraint_filter_type():
292 # Filter width/height must be integers
293 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
294 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"}
295 assert not support.is_operator_supported(op)
296
297
298def test_constraint_filter_range():
299 # Avg pool restrictions are dependent on padding:
300 # SAME padding restricts both W and H to max 8
301 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
302 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"}
303 assert not support.is_operator_supported(op)
304 # VALID padding limits are much larger
305 op.attrs["padding"] = b"VALID"
306 assert support.is_operator_supported(op)
307
308
309def test_constraint_filter_height_range_valid_pad():
310 # Avg pool restrictions are dependent on padding:
311 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
312 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"VALID"}
313 assert support.is_operator_supported(op)
314 # VALID padding restricts to 256 in filter height
315 op.attrs["filter_height"] = 257
316 assert not support.is_operator_supported(op)
317
318
319def test_constraint_filter_product_height_range_valid_pad():
320 # Avg pool restrictions are dependent on padding:
321 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
322 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"}
323 assert support.is_operator_supported(op)
324 # VALID padding restricts filter W x H to 256x256
325 op.attrs["filter_width"] = 257
326 assert not support.is_operator_supported(op)
327
328
329def test_constraint_filter_height_range():
330 # Max pool restrictions arent dependent on padding
331 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
332 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"SAME"}
333 assert support.is_operator_supported(op)
334 # Restricts to 256 in filter height
335 op.attrs["filter_height"] = 257
336 assert not support.is_operator_supported(op)
337 # Doesnt matter if SAME or VALID
338 op.attrs["padding"] = b"VALID"
339 assert not support.is_operator_supported(op)
340
341
342def test_constraint_filter_product_height_range():
343 # Max pool restrictions arent dependent on padding
344 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
345 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"}
346 assert support.is_operator_supported(op)
347 # Restricts filter W x H to 256x256
348 op.attrs["filter_width"] = 257
349 assert not support.is_operator_supported(op)
350 # Doesnt matter if SAME or VALID
351 op.attrs["padding"] = b"VALID"
352 assert not support.is_operator_supported(op)
353
354
355def test_constraint_resize():
356 # IFM W and H == 1
357 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
358 assert support.is_operator_supported(op)
359 # IFM == OFM
360 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
361 assert support.is_operator_supported(op)
362 # IFM x2 == OFM ; align_corners = False
363 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
364 assert support.is_operator_supported(op)
365 # IFM x2 -1 == OFM ; align_corners = True
366 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
367 op.attrs["align_corners"] = True
368 assert support.is_operator_supported(op)
369 # Invalid cases
370 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
371 assert not support.is_operator_supported(op)
372 op.attrs["align_corners"] = True
373 assert not support.is_operator_supported(op)
374
375
376def test_constraint_matching_shapes():
377 # Softmax requires the ifm and ofm shapes to match
378 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
379 assert not support.is_operator_supported(op)
380 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
381 assert support.is_operator_supported(op)
382
383
384def test_constraint_splitv_inferred():
385 # SplitV requires a maximum of one inferred shape (-1)
386 qp = testutil.default_quant_params()
387 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
388 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
389 op.add_input_tensor(sizes)
390 assert not support.is_operator_supported(op)
391 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
392 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
393 op.add_input_tensor(sizes)
394 assert support.is_operator_supported(op)
395
396
397def test_constraint_concat_pass():
398 # A working concat
399 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
400 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
401 ifm2.quantization = testutil.default_quant_params()
402 op.add_input_tensor(ifm2)
403 op.attrs["axis"] = 3
404 assert support.is_operator_supported(op)
405
406
407def test_constraint_axis_exists():
408 # Missing axis attribute
409 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
410 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
411 ifm2.quantization = testutil.default_quant_params()
412 op.add_input_tensor(ifm2)
413 assert not support.is_operator_supported(op)
414
415
416def test_constraint_axis_valid():
417 # Invalid axis attribute
418 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
419 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
420 ifm2.quantization = testutil.default_quant_params()
421 op.add_input_tensor(ifm2)
422 op.attrs["axis"] = 7
423 assert not support.is_operator_supported(op)
424
425
426def test_constraint_matching_dimensionality():
427 # Mismatching dimensionality: 4D+2D=4D
428 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
429 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
430 ifm2.quantization = testutil.default_quant_params()
431 op.add_input_tensor(ifm2)
432 op.attrs["axis"] = 3
433 assert not support.is_operator_supported(op)
434
435
436def test_constraint_valid_dimensions():
437 # Mismatching dimension value:
438 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
439 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
440 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
441 ifm2.quantization = testutil.default_quant_params()
442 op.add_input_tensor(ifm2)
443 op.attrs["axis"] = 3
444 assert not support.is_operator_supported(op)
445
446
447def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
448 qp = testutil.default_quant_params()
449 in0 = Tensor(in_shape, DataType.uint8, "in")
450 in0.quantization = qp
451 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
452 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
453 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
454 out = Tensor(out_shape, DataType.uint8, "out")
455 out.quantization = qp
456 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
457 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
458
459
460def create_strided_slice():
461 # Creates a valid strided slice operator with some valid inputs/outputs
462 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
463 op.attrs["begin_mask"] = 1
464 op.attrs["end_mask"] = 9
465 assert support.is_operator_supported(op)
466 return op
467
468
469def test_constraint_stridedslice_input_count():
470 # Wrong number of input tensors
471 op = create_strided_slice()
472 op.add_input_tensor(op.inputs[0].clone())
473 assert not support.is_operator_supported(op)
474
475
476def test_constraint_stridedslice_inputs_const():
477 # begin, end, stride values must not be None
478 op = create_strided_slice()
479 op.inputs[1].values = None
480 assert not support.is_operator_supported(op)
481 op = create_strided_slice()
482 op.inputs[2].values = None
483 assert not support.is_operator_supported(op)
484 op = create_strided_slice()
485 op.inputs[3].values = None
486 assert not support.is_operator_supported(op)
487
488
489def test_constraint_stridedslice_tens_size_matches():
490 op = create_strided_slice()
491 op.inputs[1].values = [1, 1, 1, 1, 1, 1, 1, 1]
492 assert not support.is_operator_supported(op)
493
494
495def test_constraint_stridedslice_stride_values():
496 # Unsupported strides
497 op = create_strided_slice()
498 op.inputs[3].values = [1, 1, 2, 1]
499 assert not support.is_operator_supported(op)
500
501
502def test_constraint_ellipsis_mask():
503 # Unsupported ellipsis mask
504 op = create_strided_slice()
505 op.attrs["ellipsis_mask"] = 1
506 assert not support.is_operator_supported(op)
507
508
509def test_constraint_axis_masks():
510 op = create_strided_slice()
511 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
512 op.attrs["new_axis_mask"] = 2
513 assert support.is_operator_supported(op)
514 op = create_strided_slice()
515 op.attrs["shrink_axis_mask"] = 3
516 assert support.is_operator_supported(op)
517 # But setting both to non-zero is not supported
518 op.attrs["new_axis_mask"] = 2
519 assert not support.is_operator_supported(op)
520
521
522def test_constraint_slice_ranges():
523 # Examples where end offset <= begin offset
524 op = create_strided_slice()
525 op.inputs[1].values = [0, 7, 2, 0]
526 assert not support.is_operator_supported(op)
527 op = create_strided_slice()
528 op.inputs[2].values = [0, 7, 2, 0]
529 assert not support.is_operator_supported(op)
530 op = create_strided_slice()
531 op.attrs["begin_mask"] = 0
532 assert not support.is_operator_supported(op)
533 op = create_strided_slice()
534 op.attrs["end_mask"] = 0
535 assert not support.is_operator_supported(op)
536
537
538def test_constraint_matching_inputs_types():
539 # input data types must match (default is uint8)
540 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
541 op.ifm2.dtype = DataType.int8
542 assert not support.is_operator_supported(op)
543
544
545def test_constraint_matching_signed():
546 # signed inputs require output to also be signed
547 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
548 op.ofm.dtype = DataType.uint8
549 assert not support.is_operator_supported(op)
550
551
552def test_constraint_unsigned_valid():
553 # unsigned inputs require output to be either:
554 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
555 # the same (default uint8)
556 assert support.is_operator_supported(op)
557 op.ofm.dtype = DataType.int8
558 assert not support.is_operator_supported(op)
559 op.ofm.dtype = DataType.int16
560 assert not support.is_operator_supported(op)
561 # or int32
562 op.ofm.dtype = DataType.int32
563 assert support.is_operator_supported(op)
564
565
566def test_constraint_inputs_int32():
567 # both inputs must be type int32
568 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
569 assert not support.is_operator_supported(op)
570 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
571 assert support.is_operator_supported(op)
572 op.ifm2.dtype = DataType.int16
573 assert not support.is_operator_supported(op)
574
575
576def test_constraint_output_int32():
577 # output must be type int32
578 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
579 assert support.is_operator_supported(op)
580 op.ofm.dtype = DataType.int16
581 assert not support.is_operator_supported(op)
582
583
584def test_constraint_matching_quantization_parameters():
585 qp = QuantizationParameters()
586 qp.scale_f32 = np.float32(1.5)
587 qp.zero_point = 128
588 # valid - all matching (uses default quant params)
589 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
590 assert support.is_operator_supported(op)
591 # invalid - ifm mismatch ofm
592 op.ifm.quantization = qp
593 assert not support.is_operator_supported(op)
594 # invalid - ifm2 mismatch ofm
595 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
596 op.ifm2.quantization = qp
597 assert not support.is_operator_supported(op)
598 # invalid - both ifm and ifm2 mismatch ofm
599 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
600 op.ifm.quantization = qp
601 op.ifm2.quantization = qp
602 assert not support.is_operator_supported(op)
603 # valid - all matching
604 op.ofm.quantization = qp
605 assert support.is_operator_supported(op)
606
607
608def test_constraint_elemwise_batch_size():
609 # BINARY CASE
610 # Batch can be >1 if dims is <=2D
611 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
612 assert support.is_operator_supported(op)
613 # For dims >2D, batch must be 1
614 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
615 assert support.is_operator_supported(op)
616 # invalid case
617 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
618 assert not support.is_operator_supported(op)
619
620 # UNARY CASE
621 # Batch can be >1 if dims is <=2D
622 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
623 assert support.is_operator_supported(op)
624 # For dims >2D, batch must be 1
625 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
626 assert support.is_operator_supported(op)
627 # invalid case
628 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
629 assert not support.is_operator_supported(op)
630
631
632def test_constraint_matching_either_shapes():
633 # BINARY CASE
634 # At least one ifm shape must match ofm's shape
635 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [4, 4], [2, 2])
636 assert support.is_operator_supported(op)
637 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [2, 2], [2, 2])
638 assert support.is_operator_supported(op)
639 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
640 assert not support.is_operator_supported(op)
641
642 # UNARY CASE
643 # No second input so this is treated the same as requiring ifm shape to match ofm shape
644 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
645 assert support.is_operator_supported(op)
646 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
647 assert not support.is_operator_supported(op)
648
649
650def test_constraint_alpha_valid():
651 # Alpha cannot be negative
652 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
653 op.attrs["alpha"] = 0
654 assert support.is_operator_supported(op)
655 op.attrs["alpha"] = -1
656 assert not support.is_operator_supported(op)