blob: d9b241fd78e2d3f2243aaf141ccfee7e494fae6c [file] [log] [blame]
Rickard Bolinbc6ee582022-11-04 08:24:29 +00001# SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for tflite support_operators
19import numpy as np
20
21from ethosu.vela.data_type import DataType
22from ethosu.vela.operation import ActivationFunction
23from ethosu.vela.operation import Op
24from ethosu.vela.operation import Padding
25from ethosu.vela.tensor import create_const_tensor
26from ethosu.vela.tensor import QuantizationParameters
27from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
30
31support = TFLiteSupportedOperators()
32
33
34def test_constraint_tens_dtype():
35 # Tensors can only be of type uint8, int8, int16 and int32
36 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
37 assert not support.is_operator_supported(op)
38
39
40def test_constraint_tens_int32_ops():
41 # For int32, only select op types are allowed:
42 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
43 assert support.is_operator_supported(op)
44 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
45 assert not support.is_operator_supported(op)
46
47
48def test_constraint_tens_dimension():
49 # Tensors can only have values in the inclusive range of 1-65535
50 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
51 assert not support.is_operator_supported(op)
52
53
54def test_constraint_tens_quant_per_axis_not_supp():
55 # Quantization scale cannot be array-valued for elemwise ops
56 qp = QuantizationParameters()
57 qp.zero_point = np.zeros((1, 3))
58 qp.scale_f32 = np.ones((1, 3))
59 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
60 assert not support.is_operator_supported(op)
61
62
63def test_constraint_tens_quant_per_axis_is_supp():
64 op = testutil.create_op_with_quant_tensors(
65 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
66 )
67 op.attrs = {"stride_w": 1, "stride_h": 1}
68 assert support.is_operator_supported(op)
69 qp = QuantizationParameters()
70 qp.zero_point = np.zeros((1, 3))
71 qp.scale_f32 = np.ones((1, 3))
72 op.bias.quantization = qp
73 assert support.is_operator_supported(op)
74
75
76def test_constraint_fc_output_2d_is_supp():
77 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
78 assert support.is_operator_supported(op)
79 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
80 assert support.is_operator_supported(op)
81
82
83def test_constraint_faf():
84 # Fused activation functions, if set, must be a valid op type
85 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
86 op.activation = ActivationFunction(Op.Conv2D)
87 assert not support.is_operator_supported(op)
88
89
90def test_constraint_faf_ofm_dtype():
91 # If fused activation function is present, OFM must be 8 or 16 bit
92 shp = [1, 8, 8, 8]
93 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
94 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
95 op.activation = ActivationFunction(Op.Relu)
96 expected = dtype.size_in_bytes() <= 2
97 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
98
99
100def test_constraint_conv_pass():
101 # First test a simple conv passes
102 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
103 op.attrs = {"stride_w": 1, "stride_h": 1}
104 assert support.is_operator_supported(op)
105
106
107def test_constraint_stride_range():
108 # Stride width and height must lie within a certain range
109 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
110 op.attrs = {"stride_w": 0, "stride_h": 20}
111 assert not support.is_operator_supported(op)
112
113
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200114def test_constraint_dilated_height_range():
115 # Dilated kernel height must lie within a certain range
116 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
117 op.attrs = {"stride_w": 1, "stride_h": 1}
118 assert not support.is_operator_supported(op)
119
120
121def test_constraint_dilated_product_range():
122 # Dilated kernel width x height must lie within a certain range
123 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
124 op.attrs = {"stride_w": 1, "stride_h": 1}
125 assert not support.is_operator_supported(op)
126
127
128def test_constraint_weights_type():
129 # Weight tensor must be 8-bit
130 op = testutil.create_op_with_quant_tensors(
131 Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
132 )
133 op.attrs = {"stride_w": 1, "stride_h": 1}
134 assert not support.is_operator_supported(op)
135
136
137def test_constraint_weights_const():
138 # Weight tensor cannot be non-const tensors
139 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
140 op.attrs = {"stride_w": 1, "stride_h": 1}
141 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
142 weights.quantization = testutil.default_quant_params()
143 op.add_input_tensor(weights)
144 assert not support.is_operator_supported(op)
145
146
147def test_constraint_weights_limit():
148 # Sum of weights has a limit
149 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
150 op.attrs = {"stride_w": 1, "stride_h": 1}
151 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
152 assert not support.is_operator_supported(op)
153
154
155def test_constraint_bias_type():
156 # Bias must have a certain datatype
157 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
158 op.attrs = {"stride_w": 1, "stride_h": 1}
159 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
160 op.add_input_tensor(bias)
161 assert not support.is_operator_supported(op)
162
163
164def test_constraint_bias_40bit():
165 # Bias must not exceed 40-bit
166 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
167 op.attrs = {"stride_w": 1, "stride_h": 1}
168 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
169 bias.values = np.array([0x01FF_FFFF_FFFF])
170 op.add_input_tensor(bias)
171 assert not support.is_operator_supported(op)
172
173
174def test_constraint_batch_size():
175 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
176 op.attrs = {"stride_w": 1, "stride_h": 1}
177 assert not support.is_operator_supported(op)
178
179
180def test_constraint_depth_multiplier():
181 # Valid. Depth multiplier is 1 so no further constraints
182 op = testutil.create_op_with_quant_tensors(
183 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
184 )
185 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
186 assert support.is_operator_supported(op)
187 # Invalid. Depth multiplier doesnt equal ofm channel
188 op = testutil.create_op_with_quant_tensors(
189 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
190 )
191 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
192 assert not support.is_operator_supported(op)
193 # Valid. Depth multiplier is equal to ofm channel
194 op = testutil.create_op_with_quant_tensors(
195 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
196 )
197 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
198 assert support.is_operator_supported(op)
199
200
201def test_constraint_tconv_stride():
202 # Strides must be 2
203 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
204 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
205 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
206 ifm.quantization = testutil.default_quant_params()
207 op.add_input_tensor(ifm)
208 assert not support.is_operator_supported(op)
209
210
211def test_constraint_tconv_same():
212 # Valid
213 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
214 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
215 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
216 ifm.quantization = testutil.default_quant_params()
217 op.add_input_tensor(ifm)
218 assert support.is_operator_supported(op)
219 # Invalid
220 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
221 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
222 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
223 ifm.quantization = testutil.default_quant_params()
224 op.add_input_tensor(ifm)
225 assert not support.is_operator_supported(op)
226
227
228def test_constraint_tconv_valid():
229 # Valid
230 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
231 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
232 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
233 ifm.quantization = testutil.default_quant_params()
234 op.add_input_tensor(ifm)
235 assert support.is_operator_supported(op)
236 # Invalid
237 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
238 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
239 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
240 ifm.quantization = testutil.default_quant_params()
241 op.add_input_tensor(ifm)
242 assert not support.is_operator_supported(op)
243
244
245def test_constraint_filter_range():
246 # Avg pool restrictions are dependent on padding:
247 # SAME padding restricts both W and H to max 8
248 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
249 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
250 assert not support.is_operator_supported(op)
251 # VALID padding limits are much larger
252 op.attrs["padding"] = Padding.VALID
253 assert support.is_operator_supported(op)
254
255
256def test_constraint_filter_height_range_valid_pad():
257 # Avg pool restrictions are dependent on padding:
258 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
259 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
260 assert support.is_operator_supported(op)
261 # VALID padding restricts to 256 in filter height
262 op.attrs["filter_height"] = 257
263 assert not support.is_operator_supported(op)
264
265
266def test_constraint_filter_product_height_range_valid_pad():
267 # Avg pool restrictions are dependent on padding:
268 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
269 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
270 assert support.is_operator_supported(op)
271 # VALID padding restricts filter W x H to 256x256
272 op.attrs["filter_width"] = 257
273 assert not support.is_operator_supported(op)
274
275
276def test_constraint_filter_height_range():
277 # Max pool restrictions arent dependent on padding
278 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
279 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
280 assert support.is_operator_supported(op)
281 # Restricts to 256 in filter height
282 op.attrs["filter_height"] = 257
283 assert not support.is_operator_supported(op)
284 # Doesnt matter if SAME or VALID
285 op.attrs["padding"] = Padding.VALID
286 assert not support.is_operator_supported(op)
287
288
289def test_constraint_filter_product_height_range():
290 # Max pool restrictions arent dependent on padding
291 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
292 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
293 assert support.is_operator_supported(op)
294 # Restricts filter W x H to 256x256
295 op.attrs["filter_width"] = 257
296 assert not support.is_operator_supported(op)
297 # Doesnt matter if SAME or VALID
298 op.attrs["padding"] = Padding.VALID
299 assert not support.is_operator_supported(op)
300
301
Tim Hall885033b2022-07-21 11:46:03 +0100302def test_constraint_resize():
303 for resize_op in Op.op_set(Op.is_resize_op):
304 # IFM W and H == 1
305 op = testutil.create_op_with_quant_tensors(resize_op, [1, 1, 1, 8], [1, 8, 8, 8])
306 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
307 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100308
Tim Hall885033b2022-07-21 11:46:03 +0100309 # IFM == OFM
310 op = testutil.create_op_with_quant_tensors(resize_op, [1, 8, 8, 8], [1, 8, 8, 8])
311 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
312 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100313
Tim Hall885033b2022-07-21 11:46:03 +0100314 # IFM x2 == OFM ; align_corners = False
315 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
316 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
317 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100318
Tim Hall885033b2022-07-21 11:46:03 +0100319 # IFM x4 == OFM ; align_corners = False
320 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 16, 16, 8])
321 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16], np.int32))
322 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100323
Tim Hall885033b2022-07-21 11:46:03 +0100324 # IFM x8 == OFM ; align_corners = False
325 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 32, 32, 8])
326 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32], np.int32))
327 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100328
Tim Hall885033b2022-07-21 11:46:03 +0100329 # IFM -1 x2 == OFM -1 ; align_corners = True
330 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 7, 7, 8])
331 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
332 op.attrs["align_corners"] = True
333 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100334
Tim Hall885033b2022-07-21 11:46:03 +0100335 # IFM -1 x4 == OFM -1 ; align_corners = True
336 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 13, 13, 8])
337 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13], np.int32))
338 op.attrs["align_corners"] = True
339 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100340
Tim Hall885033b2022-07-21 11:46:03 +0100341 # IFM -1 x8 == OFM -1 ; align_corners = True
342 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 25, 25, 8])
343 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25], np.int32))
344 op.attrs["align_corners"] = True
345 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100346
Tim Hall885033b2022-07-21 11:46:03 +0100347 # Invalid case - upscale size
348 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 17, 17, 8])
349 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17], np.int32))
350 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100351
Tim Hall885033b2022-07-21 11:46:03 +0100352 # Invalid case - upscale size with align corners
353 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 15, 15, 8])
354 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15], np.int32))
355 op.attrs["align_corners"] = True
356 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100357
358
Tim Hall885033b2022-07-21 11:46:03 +0100359def test_constraint_resize_size():
360 for resize_op in Op.op_set(Op.is_resize_op):
361 # Invalid case - size != ofm size
362 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
363 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
364 assert not support.is_operator_supported(op)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200365
366
Tim Hall885033b2022-07-21 11:46:03 +0100367def test_constraint_resize_attrs():
368 for resize_op in Op.op_set(Op.is_resize_op):
369 # Invalid case - both align corners and half-pixel centers
370 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
371 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
372 op.attrs["align_corners"] = True
373 op.attrs["half_pixel_centers"] = True
374 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100375
376
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200377def test_constraint_concat_pass():
378 # A working concat
379 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
380 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
381 ifm2.quantization = testutil.default_quant_params()
382 op.add_input_tensor(ifm2)
383 op.attrs["axis"] = 3
384 assert support.is_operator_supported(op)
385
386
387def create_pad_op(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200388 in_shape,
389 out_shape,
390 padding,
391 in_dtype=DataType.int8,
392 out_dtype=DataType.int8,
393 pad_dtype=DataType.int32,
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200394):
395 qp = testutil.default_quant_params()
396 in0 = Tensor(in_shape, in_dtype, "in")
397 in0.quantization = qp
398 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
399 out = Tensor(out_shape, out_dtype, "out")
400 out.quantization = qp.clone()
401 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
402 return op
403
404
405def test_constraint_padded_dimensions():
406 # Incorrect padding dimensions, can only pad width and height
Jonas Ohlssond8575072022-03-30 10:30:25 +0200407 op = create_pad_op(
408 in_shape=[1, 1, 1, 1],
409 out_shape=[1, 3, 3, 1],
410 padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
411 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200412 assert not support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200413 op = create_pad_op(
414 in_shape=[1, 1, 1, 1],
415 out_shape=[1, 3, 3, 1],
416 padding=[[1, 1], [1, 1], [0, 0]],
417 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200418 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200419 op = create_pad_op(
420 in_shape=[1, 1, 1, 1],
421 out_shape=[1, 3, 3, 1],
422 padding=[[1, 1], [1, 1], [0, 1]],
423 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200424 assert not support.is_operator_supported(op)
425
426
427def test_constraint_pad_shape():
428 # PAD operator must be of shape (3,2) or (4,2)
429 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
430 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200431 op = create_pad_op(
432 in_shape=[1, 1, 1, 1],
433 out_shape=[1, 3, 3, 1],
434 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
435 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200436 assert not support.is_operator_supported(op)
437
438
439def test_constraint_pad_none():
Jonas Ohlssond8575072022-03-30 10:30:25 +0200440 op = create_pad_op(
441 in_shape=[1, 1, 1, 1],
442 out_shape=[1, 3, 3, 1],
443 padding=[],
444 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200445 assert not support.is_operator_supported(op)
446
447
448def test_constraint_pad_dtype():
449 # PAD operator dtype should be int32 or int64
450 op = create_pad_op(
451 in_shape=[1, 1, 1, 1],
452 out_shape=[1, 3, 3, 1],
453 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
454 pad_dtype=DataType.int16,
455 )
456 assert not support.is_operator_supported(op)
457
458
459def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
460 qp = testutil.default_quant_params()
461 in0 = Tensor(in_shape, DataType.uint8, "in")
462 in0.quantization = qp
463 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
464 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
465 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
466 out = Tensor(out_shape, DataType.uint8, "out")
467 out.quantization = qp
468 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
469 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
470
471
472def create_strided_slice():
473 # Creates a valid strided slice operator with some valid inputs/outputs
474 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
475 op.attrs["begin_mask"] = 1
476 op.attrs["end_mask"] = 9
477 assert support.is_operator_supported(op)
478 return op
479
480
481def test_constraint_stridedslice_stride_values():
482 # Unsupported strides
483 op = create_strided_slice()
484 op.inputs[3].values = [1, 1, 2, 1]
485 assert not support.is_operator_supported(op)
486
487
488def test_constraint_inputs_int32():
489 # both inputs must be type int32
490 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
491 assert not support.is_operator_supported(op)
492 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
493 assert support.is_operator_supported(op)
494 op.ifm2.dtype = DataType.int16
495 assert not support.is_operator_supported(op)
496
497
498def test_constraint_output_int32():
499 # output must be type int32
500 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
501 assert support.is_operator_supported(op)
502 op.ofm.dtype = DataType.int16
503 assert not support.is_operator_supported(op)
504
505
506def test_constraint_matching_quantization_parameters():
507 qp = QuantizationParameters()
508 qp.scale_f32 = np.float32(1.5)
509 qp.zero_point = 128
510 # valid - all matching (uses default quant params)
511 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
512 assert support.is_operator_supported(op)
513 # invalid - ifm mismatch ofm
514 op.ifm.quantization = qp
515 assert not support.is_operator_supported(op)
516 # invalid - ifm2 mismatch ofm
517 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
518 op.ifm2.quantization = qp
519 assert not support.is_operator_supported(op)
520 # invalid - both ifm and ifm2 mismatch ofm
521 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
522 op.ifm.quantization = qp
523 op.ifm2.quantization = qp
524 assert not support.is_operator_supported(op)
525 # valid - all matching
526 op.ofm.quantization = qp
527 assert support.is_operator_supported(op)
528 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
529 assert support.is_operator_supported(op)
530
531
532def test_constraint_elemwise_batch_size():
533 # BINARY CASE
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200534 # Batch can be >1 if dims is <=3D
535 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200536 assert support.is_operator_supported(op)
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200537 # For dims >3D, batch must be 1
538 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200539 assert support.is_operator_supported(op)
540 # invalid case
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200541 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200542 assert not support.is_operator_supported(op)
543
544 # UNARY CASE
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200545 # Batch can be >1 if dims is <=3D
546 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200547 assert support.is_operator_supported(op)
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200548 # For dims >3D, batch must be 1
549 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2, 2], None, [1, 2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200550 assert support.is_operator_supported(op)
551 # invalid case
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200552 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2, 2], None, [2, 2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200553 assert not support.is_operator_supported(op)
554
555
556def test_constraint_broadcast_shapes():
557 # BINARY CASE
558 # Only allow broadcast to 1 dim, for 1 rank index
559 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
560 assert support.is_operator_supported(op)
561 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
562 assert support.is_operator_supported(op)
563 # Only allow broadcast to 1 dim, for 3 rank indexes
564 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
565 assert support.is_operator_supported(op)
566 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
567 assert support.is_operator_supported(op)
568 # One broadcast dim not 1
569 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
570 assert not support.is_operator_supported(op)
571 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
572 assert not support.is_operator_supported(op)
573 # OFM shape dim largest ifm/ifm2 shape dim
574 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
575 assert not support.is_operator_supported(op)
576 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
577 assert not support.is_operator_supported(op)
578 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
579 assert not support.is_operator_supported(op)
580 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
581 assert not support.is_operator_supported(op)
582
583
584def create_mean(input_shape, output_shape, axis, datatype, attrs):
585 ifm = Tensor(input_shape, datatype, "in")
586 ifm.quantization = testutil.default_quant_params()
587 ofm = Tensor(output_shape, datatype, "out")
588 ofm.quantization = testutil.default_quant_params()
589 if type(axis) is list:
590 indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
591 elif type(axis) is int:
592 indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
593 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
594 return op
595
596
597def test_mean_hw_product():
598 op = create_mean([1, 64, 64, 16], [1, 16], [1, 2], DataType.uint8, {})
599 assert support.is_operator_supported(op)
600 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
601 assert not support.is_operator_supported(op)
602
603
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200604def test_mean_hw_product_avgpool():
605 op = create_mean([1, 200, 200, 16], [1, 16], [1, 2], DataType.uint8, {"keep_dims": False})
606 assert support.is_operator_supported(op)
607 op = create_mean([1, 200, 200, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
608 assert not support.is_operator_supported(op)