blob: 35fc1a6f80289424fa07609235ee03c16262c65e [file] [log] [blame]
Rickard Bolinfea15162022-07-04 16:19:16 +00001# Copyright (C) 2020-2022 Arm Limited or its affiliates. All rights reserved.
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for tflite support_operators
19import numpy as np
20
21from ethosu.vela.data_type import DataType
22from ethosu.vela.operation import ActivationFunction
23from ethosu.vela.operation import Op
24from ethosu.vela.operation import Padding
25from ethosu.vela.tensor import create_const_tensor
26from ethosu.vela.tensor import QuantizationParameters
27from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
30
31support = TFLiteSupportedOperators()
32
33
34def test_constraint_tens_dtype():
35 # Tensors can only be of type uint8, int8, int16 and int32
36 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
37 assert not support.is_operator_supported(op)
38
39
40def test_constraint_tens_int32_ops():
41 # For int32, only select op types are allowed:
42 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
43 assert support.is_operator_supported(op)
44 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
45 assert not support.is_operator_supported(op)
46
47
48def test_constraint_tens_dimension():
49 # Tensors can only have values in the inclusive range of 1-65535
50 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
51 assert not support.is_operator_supported(op)
52
53
54def test_constraint_tens_quant_per_axis_not_supp():
55 # Quantization scale cannot be array-valued for elemwise ops
56 qp = QuantizationParameters()
57 qp.zero_point = np.zeros((1, 3))
58 qp.scale_f32 = np.ones((1, 3))
59 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
60 assert not support.is_operator_supported(op)
61
62
63def test_constraint_tens_quant_per_axis_is_supp():
64 op = testutil.create_op_with_quant_tensors(
65 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
66 )
67 op.attrs = {"stride_w": 1, "stride_h": 1}
68 assert support.is_operator_supported(op)
69 qp = QuantizationParameters()
70 qp.zero_point = np.zeros((1, 3))
71 qp.scale_f32 = np.ones((1, 3))
72 op.bias.quantization = qp
73 assert support.is_operator_supported(op)
74
75
76def test_constraint_fc_output_2d_is_supp():
77 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
78 assert support.is_operator_supported(op)
79 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
80 assert support.is_operator_supported(op)
81
82
83def test_constraint_faf():
84 # Fused activation functions, if set, must be a valid op type
85 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
86 op.activation = ActivationFunction(Op.Conv2D)
87 assert not support.is_operator_supported(op)
88
89
90def test_constraint_faf_ofm_dtype():
91 # If fused activation function is present, OFM must be 8 or 16 bit
92 shp = [1, 8, 8, 8]
93 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
94 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
95 op.activation = ActivationFunction(Op.Relu)
96 expected = dtype.size_in_bytes() <= 2
97 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
98
99
100def test_constraint_conv_pass():
101 # First test a simple conv passes
102 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
103 op.attrs = {"stride_w": 1, "stride_h": 1}
104 assert support.is_operator_supported(op)
105
106
107def test_constraint_stride_range():
108 # Stride width and height must lie within a certain range
109 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
110 op.attrs = {"stride_w": 0, "stride_h": 20}
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_dilation_range():
115 # Dilation width and height must lie within a certain range
116 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
117 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
118 assert not support.is_operator_supported(op)
119
120
121def test_constraint_dilated_height_range():
122 # Dilated kernel height must lie within a certain range
123 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
124 op.attrs = {"stride_w": 1, "stride_h": 1}
125 assert not support.is_operator_supported(op)
126
127
128def test_constraint_dilated_product_range():
129 # Dilated kernel width x height must lie within a certain range
130 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
131 op.attrs = {"stride_w": 1, "stride_h": 1}
132 assert not support.is_operator_supported(op)
133
134
135def test_constraint_weights_type():
136 # Weight tensor must be 8-bit
137 op = testutil.create_op_with_quant_tensors(
138 Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
139 )
140 op.attrs = {"stride_w": 1, "stride_h": 1}
141 assert not support.is_operator_supported(op)
142
143
144def test_constraint_weights_const():
145 # Weight tensor cannot be non-const tensors
146 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
147 op.attrs = {"stride_w": 1, "stride_h": 1}
148 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
149 weights.quantization = testutil.default_quant_params()
150 op.add_input_tensor(weights)
151 assert not support.is_operator_supported(op)
152
153
154def test_constraint_weights_limit():
155 # Sum of weights has a limit
156 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
157 op.attrs = {"stride_w": 1, "stride_h": 1}
158 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
159 assert not support.is_operator_supported(op)
160
161
162def test_constraint_bias_type():
163 # Bias must have a certain datatype
164 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
165 op.attrs = {"stride_w": 1, "stride_h": 1}
166 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
167 op.add_input_tensor(bias)
168 assert not support.is_operator_supported(op)
169
170
171def test_constraint_bias_40bit():
172 # Bias must not exceed 40-bit
173 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
174 op.attrs = {"stride_w": 1, "stride_h": 1}
175 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
176 bias.values = np.array([0x01FF_FFFF_FFFF])
177 op.add_input_tensor(bias)
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_batch_size():
182 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
183 op.attrs = {"stride_w": 1, "stride_h": 1}
184 assert not support.is_operator_supported(op)
185
186
187def test_constraint_depth_multiplier():
188 # Valid. Depth multiplier is 1 so no further constraints
189 op = testutil.create_op_with_quant_tensors(
190 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
191 )
192 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
193 assert support.is_operator_supported(op)
194 # Invalid. Depth multiplier doesnt equal ofm channel
195 op = testutil.create_op_with_quant_tensors(
196 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
197 )
198 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
199 assert not support.is_operator_supported(op)
200 # Valid. Depth multiplier is equal to ofm channel
201 op = testutil.create_op_with_quant_tensors(
202 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
203 )
204 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
205 assert support.is_operator_supported(op)
206
207
208def test_constraint_tconv_stride():
209 # Strides must be 2
210 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
211 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
212 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
213 ifm.quantization = testutil.default_quant_params()
214 op.add_input_tensor(ifm)
215 assert not support.is_operator_supported(op)
216
217
218def test_constraint_tconv_same():
219 # Valid
220 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
221 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
222 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
223 ifm.quantization = testutil.default_quant_params()
224 op.add_input_tensor(ifm)
225 assert support.is_operator_supported(op)
226 # Invalid
227 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
228 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
229 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
230 ifm.quantization = testutil.default_quant_params()
231 op.add_input_tensor(ifm)
232 assert not support.is_operator_supported(op)
233
234
235def test_constraint_tconv_valid():
236 # Valid
237 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
238 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
239 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
240 ifm.quantization = testutil.default_quant_params()
241 op.add_input_tensor(ifm)
242 assert support.is_operator_supported(op)
243 # Invalid
244 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
245 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
246 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
247 ifm.quantization = testutil.default_quant_params()
248 op.add_input_tensor(ifm)
249 assert not support.is_operator_supported(op)
250
251
252def test_constraint_filter_range():
253 # Avg pool restrictions are dependent on padding:
254 # SAME padding restricts both W and H to max 8
255 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
256 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
257 assert not support.is_operator_supported(op)
258 # VALID padding limits are much larger
259 op.attrs["padding"] = Padding.VALID
260 assert support.is_operator_supported(op)
261
262
263def test_constraint_filter_height_range_valid_pad():
264 # Avg pool restrictions are dependent on padding:
265 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
266 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
267 assert support.is_operator_supported(op)
268 # VALID padding restricts to 256 in filter height
269 op.attrs["filter_height"] = 257
270 assert not support.is_operator_supported(op)
271
272
273def test_constraint_filter_product_height_range_valid_pad():
274 # Avg pool restrictions are dependent on padding:
275 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
276 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
277 assert support.is_operator_supported(op)
278 # VALID padding restricts filter W x H to 256x256
279 op.attrs["filter_width"] = 257
280 assert not support.is_operator_supported(op)
281
282
283def test_constraint_filter_height_range():
284 # Max pool restrictions arent dependent on padding
285 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
286 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
287 assert support.is_operator_supported(op)
288 # Restricts to 256 in filter height
289 op.attrs["filter_height"] = 257
290 assert not support.is_operator_supported(op)
291 # Doesnt matter if SAME or VALID
292 op.attrs["padding"] = Padding.VALID
293 assert not support.is_operator_supported(op)
294
295
296def test_constraint_filter_product_height_range():
297 # Max pool restrictions arent dependent on padding
298 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
299 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
300 assert support.is_operator_supported(op)
301 # Restricts filter W x H to 256x256
302 op.attrs["filter_width"] = 257
303 assert not support.is_operator_supported(op)
304 # Doesnt matter if SAME or VALID
305 op.attrs["padding"] = Padding.VALID
306 assert not support.is_operator_supported(op)
307
308
Tim Hall885033b2022-07-21 11:46:03 +0100309def test_constraint_resize():
310 for resize_op in Op.op_set(Op.is_resize_op):
311 # IFM W and H == 1
312 op = testutil.create_op_with_quant_tensors(resize_op, [1, 1, 1, 8], [1, 8, 8, 8])
313 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
314 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100315
Tim Hall885033b2022-07-21 11:46:03 +0100316 # IFM == OFM
317 op = testutil.create_op_with_quant_tensors(resize_op, [1, 8, 8, 8], [1, 8, 8, 8])
318 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
319 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100320
Tim Hall885033b2022-07-21 11:46:03 +0100321 # IFM x2 == OFM ; align_corners = False
322 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
323 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
324 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100325
Tim Hall885033b2022-07-21 11:46:03 +0100326 # IFM x4 == OFM ; align_corners = False
327 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 16, 16, 8])
328 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16], np.int32))
329 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100330
Tim Hall885033b2022-07-21 11:46:03 +0100331 # IFM x8 == OFM ; align_corners = False
332 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 32, 32, 8])
333 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32], np.int32))
334 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100335
Tim Hall885033b2022-07-21 11:46:03 +0100336 # IFM -1 x2 == OFM -1 ; align_corners = True
337 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 7, 7, 8])
338 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
339 op.attrs["align_corners"] = True
340 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100341
Tim Hall885033b2022-07-21 11:46:03 +0100342 # IFM -1 x4 == OFM -1 ; align_corners = True
343 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 13, 13, 8])
344 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13], np.int32))
345 op.attrs["align_corners"] = True
346 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100347
Tim Hall885033b2022-07-21 11:46:03 +0100348 # IFM -1 x8 == OFM -1 ; align_corners = True
349 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 25, 25, 8])
350 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25], np.int32))
351 op.attrs["align_corners"] = True
352 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100353
Tim Hall885033b2022-07-21 11:46:03 +0100354 # Invalid case - upscale size
355 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 17, 17, 8])
356 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17], np.int32))
357 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100358
Tim Hall885033b2022-07-21 11:46:03 +0100359 # Invalid case - upscale size with align corners
360 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 15, 15, 8])
361 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15], np.int32))
362 op.attrs["align_corners"] = True
363 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100364
365
Tim Hall885033b2022-07-21 11:46:03 +0100366def test_constraint_resize_size():
367 for resize_op in Op.op_set(Op.is_resize_op):
368 # Invalid case - size != ofm size
369 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
370 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
371 assert not support.is_operator_supported(op)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200372
373
Tim Hall885033b2022-07-21 11:46:03 +0100374def test_constraint_resize_attrs():
375 for resize_op in Op.op_set(Op.is_resize_op):
376 # Invalid case - both align corners and half-pixel centers
377 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
378 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
379 op.attrs["align_corners"] = True
380 op.attrs["half_pixel_centers"] = True
381 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100382
383
Tim Hall885033b2022-07-21 11:46:03 +0100384def test_constraint_resize_half_pixel_centers():
385 for resize_op in Op.op_set(Op.is_resize_op):
Rickard Bolinfea15162022-07-04 16:19:16 +0000386 # Half-pixel centers is only supported for resize bilinear
Tim Hall885033b2022-07-21 11:46:03 +0100387 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
388 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
389 op.attrs["half_pixel_centers"] = True
Rickard Bolinfea15162022-07-04 16:19:16 +0000390 if resize_op == Op.ResizeBilinear:
391 assert support.is_operator_supported(op)
392 else:
393 assert not support.is_operator_supported(op)
erik.andersson@arm.comba2555e2021-10-28 14:08:52 +0200394
395
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200396def test_constraint_concat_pass():
397 # A working concat
398 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
399 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
400 ifm2.quantization = testutil.default_quant_params()
401 op.add_input_tensor(ifm2)
402 op.attrs["axis"] = 3
403 assert support.is_operator_supported(op)
404
405
406def create_pad_op(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200407 in_shape,
408 out_shape,
409 padding,
410 in_dtype=DataType.int8,
411 out_dtype=DataType.int8,
412 pad_dtype=DataType.int32,
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200413):
414 qp = testutil.default_quant_params()
415 in0 = Tensor(in_shape, in_dtype, "in")
416 in0.quantization = qp
417 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
418 out = Tensor(out_shape, out_dtype, "out")
419 out.quantization = qp.clone()
420 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
421 return op
422
423
424def test_constraint_padded_dimensions():
425 # Incorrect padding dimensions, can only pad width and height
Jonas Ohlssond8575072022-03-30 10:30:25 +0200426 op = create_pad_op(
427 in_shape=[1, 1, 1, 1],
428 out_shape=[1, 3, 3, 1],
429 padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
430 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200431 assert not support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200432 op = create_pad_op(
433 in_shape=[1, 1, 1, 1],
434 out_shape=[1, 3, 3, 1],
435 padding=[[1, 1], [1, 1], [0, 0]],
436 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200437 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200438 op = create_pad_op(
439 in_shape=[1, 1, 1, 1],
440 out_shape=[1, 3, 3, 1],
441 padding=[[1, 1], [1, 1], [0, 1]],
442 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200443 assert not support.is_operator_supported(op)
444
445
446def test_constraint_pad_shape():
447 # PAD operator must be of shape (3,2) or (4,2)
448 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
449 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200450 op = create_pad_op(
451 in_shape=[1, 1, 1, 1],
452 out_shape=[1, 3, 3, 1],
453 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
454 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200455 assert not support.is_operator_supported(op)
456
457
458def test_constraint_pad_none():
Jonas Ohlssond8575072022-03-30 10:30:25 +0200459 op = create_pad_op(
460 in_shape=[1, 1, 1, 1],
461 out_shape=[1, 3, 3, 1],
462 padding=[],
463 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200464 assert not support.is_operator_supported(op)
465
466
467def test_constraint_pad_dtype():
468 # PAD operator dtype should be int32 or int64
469 op = create_pad_op(
470 in_shape=[1, 1, 1, 1],
471 out_shape=[1, 3, 3, 1],
472 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
473 pad_dtype=DataType.int16,
474 )
475 assert not support.is_operator_supported(op)
476
477
478def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
479 qp = testutil.default_quant_params()
480 in0 = Tensor(in_shape, DataType.uint8, "in")
481 in0.quantization = qp
482 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
483 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
484 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
485 out = Tensor(out_shape, DataType.uint8, "out")
486 out.quantization = qp
487 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
488 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
489
490
491def create_strided_slice():
492 # Creates a valid strided slice operator with some valid inputs/outputs
493 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
494 op.attrs["begin_mask"] = 1
495 op.attrs["end_mask"] = 9
496 assert support.is_operator_supported(op)
497 return op
498
499
500def test_constraint_stridedslice_stride_values():
501 # Unsupported strides
502 op = create_strided_slice()
503 op.inputs[3].values = [1, 1, 2, 1]
504 assert not support.is_operator_supported(op)
505
506
507def test_constraint_inputs_int32():
508 # both inputs must be type int32
509 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
510 assert not support.is_operator_supported(op)
511 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
512 assert support.is_operator_supported(op)
513 op.ifm2.dtype = DataType.int16
514 assert not support.is_operator_supported(op)
515
516
517def test_constraint_output_int32():
518 # output must be type int32
519 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
520 assert support.is_operator_supported(op)
521 op.ofm.dtype = DataType.int16
522 assert not support.is_operator_supported(op)
523
524
525def test_constraint_matching_quantization_parameters():
526 qp = QuantizationParameters()
527 qp.scale_f32 = np.float32(1.5)
528 qp.zero_point = 128
529 # valid - all matching (uses default quant params)
530 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
531 assert support.is_operator_supported(op)
532 # invalid - ifm mismatch ofm
533 op.ifm.quantization = qp
534 assert not support.is_operator_supported(op)
535 # invalid - ifm2 mismatch ofm
536 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
537 op.ifm2.quantization = qp
538 assert not support.is_operator_supported(op)
539 # invalid - both ifm and ifm2 mismatch ofm
540 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
541 op.ifm.quantization = qp
542 op.ifm2.quantization = qp
543 assert not support.is_operator_supported(op)
544 # valid - all matching
545 op.ofm.quantization = qp
546 assert support.is_operator_supported(op)
547 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
548 assert support.is_operator_supported(op)
549
550
551def test_constraint_elemwise_batch_size():
552 # BINARY CASE
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200553 # Batch can be >1 if dims is <=3D
554 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200555 assert support.is_operator_supported(op)
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200556 # For dims >3D, batch must be 1
557 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200558 assert support.is_operator_supported(op)
559 # invalid case
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200560 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200561 assert not support.is_operator_supported(op)
562
563 # UNARY CASE
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200564 # Batch can be >1 if dims is <=3D
565 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200566 assert support.is_operator_supported(op)
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200567 # For dims >3D, batch must be 1
568 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2, 2], None, [1, 2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200569 assert support.is_operator_supported(op)
570 # invalid case
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200571 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2, 2], None, [2, 2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200572 assert not support.is_operator_supported(op)
573
574
575def test_constraint_broadcast_shapes():
576 # BINARY CASE
577 # Only allow broadcast to 1 dim, for 1 rank index
578 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
579 assert support.is_operator_supported(op)
580 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
581 assert support.is_operator_supported(op)
582 # Only allow broadcast to 1 dim, for 3 rank indexes
583 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
584 assert support.is_operator_supported(op)
585 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
586 assert support.is_operator_supported(op)
587 # One broadcast dim not 1
588 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
589 assert not support.is_operator_supported(op)
590 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
591 assert not support.is_operator_supported(op)
592 # OFM shape dim largest ifm/ifm2 shape dim
593 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
594 assert not support.is_operator_supported(op)
595 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
596 assert not support.is_operator_supported(op)
597 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
598 assert not support.is_operator_supported(op)
599 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
600 assert not support.is_operator_supported(op)
601
602
603def create_mean(input_shape, output_shape, axis, datatype, attrs):
604 ifm = Tensor(input_shape, datatype, "in")
605 ifm.quantization = testutil.default_quant_params()
606 ofm = Tensor(output_shape, datatype, "out")
607 ofm.quantization = testutil.default_quant_params()
608 if type(axis) is list:
609 indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
610 elif type(axis) is int:
611 indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
612 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
613 return op
614
615
616def test_mean_hw_product():
617 op = create_mean([1, 64, 64, 16], [1, 16], [1, 2], DataType.uint8, {})
618 assert support.is_operator_supported(op)
619 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
620 assert not support.is_operator_supported(op)
621
622
623def test_mean_hw_product_int8():
624 op = create_mean([1, 16, 16, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
625 assert support.is_operator_supported(op)
626 op = create_mean([1, 16, 17, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
627 assert not support.is_operator_supported(op)
628
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200629
630def test_mean_hw_product_avgpool():
631 op = create_mean([1, 200, 200, 16], [1, 16], [1, 2], DataType.uint8, {"keep_dims": False})
632 assert support.is_operator_supported(op)
633 op = create_mean([1, 200, 200, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
634 assert not support.is_operator_supported(op)