blob: cc8b3d2c6ff3fa22e51f8d228cf2ca541c2f9b2c [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for tflite support_operators
19import numpy as np
20
21from ethosu.vela.data_type import DataType
22from ethosu.vela.operation import ActivationFunction
23from ethosu.vela.operation import Op
24from ethosu.vela.operation import Padding
25from ethosu.vela.tensor import create_const_tensor
26from ethosu.vela.tensor import QuantizationParameters
27from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
30
31support = TFLiteSupportedOperators()
32
33
34def test_constraint_tens_dtype():
35 # Tensors can only be of type uint8, int8, int16 and int32
36 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
37 assert not support.is_operator_supported(op)
38
39
40def test_constraint_tens_int32_ops():
41 # For int32, only select op types are allowed:
42 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
43 assert support.is_operator_supported(op)
44 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
45 assert not support.is_operator_supported(op)
46
47
48def test_constraint_tens_dimension():
49 # Tensors can only have values in the inclusive range of 1-65535
50 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
51 assert not support.is_operator_supported(op)
52
53
54def test_constraint_tens_quant_per_axis_not_supp():
55 # Quantization scale cannot be array-valued for elemwise ops
56 qp = QuantizationParameters()
57 qp.zero_point = np.zeros((1, 3))
58 qp.scale_f32 = np.ones((1, 3))
59 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
60 assert not support.is_operator_supported(op)
61
62
63def test_constraint_tens_quant_per_axis_is_supp():
64 op = testutil.create_op_with_quant_tensors(
65 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
66 )
67 op.attrs = {"stride_w": 1, "stride_h": 1}
68 assert support.is_operator_supported(op)
69 qp = QuantizationParameters()
70 qp.zero_point = np.zeros((1, 3))
71 qp.scale_f32 = np.ones((1, 3))
72 op.bias.quantization = qp
73 assert support.is_operator_supported(op)
74
75
76def test_constraint_fc_output_2d_is_supp():
77 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
78 assert support.is_operator_supported(op)
79 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
80 assert support.is_operator_supported(op)
81
82
83def test_constraint_faf():
84 # Fused activation functions, if set, must be a valid op type
85 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
86 op.activation = ActivationFunction(Op.Conv2D)
87 assert not support.is_operator_supported(op)
88
89
90def test_constraint_faf_ofm_dtype():
91 # If fused activation function is present, OFM must be 8 or 16 bit
92 shp = [1, 8, 8, 8]
93 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
94 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
95 op.activation = ActivationFunction(Op.Relu)
96 expected = dtype.size_in_bytes() <= 2
97 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
98
99
100def test_constraint_conv_pass():
101 # First test a simple conv passes
102 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
103 op.attrs = {"stride_w": 1, "stride_h": 1}
104 assert support.is_operator_supported(op)
105
106
107def test_constraint_stride_range():
108 # Stride width and height must lie within a certain range
109 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
110 op.attrs = {"stride_w": 0, "stride_h": 20}
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_dilation_range():
115 # Dilation width and height must lie within a certain range
116 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
117 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
118 assert not support.is_operator_supported(op)
119
120
121def test_constraint_dilated_height_range():
122 # Dilated kernel height must lie within a certain range
123 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
124 op.attrs = {"stride_w": 1, "stride_h": 1}
125 assert not support.is_operator_supported(op)
126
127
128def test_constraint_dilated_product_range():
129 # Dilated kernel width x height must lie within a certain range
130 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
131 op.attrs = {"stride_w": 1, "stride_h": 1}
132 assert not support.is_operator_supported(op)
133
134
135def test_constraint_weights_type():
136 # Weight tensor must be 8-bit
137 op = testutil.create_op_with_quant_tensors(
138 Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
139 )
140 op.attrs = {"stride_w": 1, "stride_h": 1}
141 assert not support.is_operator_supported(op)
142
143
144def test_constraint_weights_const():
145 # Weight tensor cannot be non-const tensors
146 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
147 op.attrs = {"stride_w": 1, "stride_h": 1}
148 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
149 weights.quantization = testutil.default_quant_params()
150 op.add_input_tensor(weights)
151 assert not support.is_operator_supported(op)
152
153
154def test_constraint_weights_limit():
155 # Sum of weights has a limit
156 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
157 op.attrs = {"stride_w": 1, "stride_h": 1}
158 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
159 assert not support.is_operator_supported(op)
160
161
162def test_constraint_bias_type():
163 # Bias must have a certain datatype
164 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
165 op.attrs = {"stride_w": 1, "stride_h": 1}
166 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
167 op.add_input_tensor(bias)
168 assert not support.is_operator_supported(op)
169
170
171def test_constraint_bias_40bit():
172 # Bias must not exceed 40-bit
173 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
174 op.attrs = {"stride_w": 1, "stride_h": 1}
175 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
176 bias.values = np.array([0x01FF_FFFF_FFFF])
177 op.add_input_tensor(bias)
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_batch_size():
182 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
183 op.attrs = {"stride_w": 1, "stride_h": 1}
184 assert not support.is_operator_supported(op)
185
186
187def test_constraint_depth_multiplier():
188 # Valid. Depth multiplier is 1 so no further constraints
189 op = testutil.create_op_with_quant_tensors(
190 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
191 )
192 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
193 assert support.is_operator_supported(op)
194 # Invalid. Depth multiplier doesnt equal ofm channel
195 op = testutil.create_op_with_quant_tensors(
196 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
197 )
198 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
199 assert not support.is_operator_supported(op)
200 # Valid. Depth multiplier is equal to ofm channel
201 op = testutil.create_op_with_quant_tensors(
202 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
203 )
204 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
205 assert support.is_operator_supported(op)
206
207
208def test_constraint_tconv_stride():
209 # Strides must be 2
210 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
211 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
212 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
213 ifm.quantization = testutil.default_quant_params()
214 op.add_input_tensor(ifm)
215 assert not support.is_operator_supported(op)
216
217
218def test_constraint_tconv_same():
219 # Valid
220 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
221 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
222 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
223 ifm.quantization = testutil.default_quant_params()
224 op.add_input_tensor(ifm)
225 assert support.is_operator_supported(op)
226 # Invalid
227 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
228 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
229 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
230 ifm.quantization = testutil.default_quant_params()
231 op.add_input_tensor(ifm)
232 assert not support.is_operator_supported(op)
233
234
235def test_constraint_tconv_valid():
236 # Valid
237 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
238 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
239 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
240 ifm.quantization = testutil.default_quant_params()
241 op.add_input_tensor(ifm)
242 assert support.is_operator_supported(op)
243 # Invalid
244 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
245 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
246 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
247 ifm.quantization = testutil.default_quant_params()
248 op.add_input_tensor(ifm)
249 assert not support.is_operator_supported(op)
250
251
252def test_constraint_filter_range():
253 # Avg pool restrictions are dependent on padding:
254 # SAME padding restricts both W and H to max 8
255 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
256 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
257 assert not support.is_operator_supported(op)
258 # VALID padding limits are much larger
259 op.attrs["padding"] = Padding.VALID
260 assert support.is_operator_supported(op)
261
262
263def test_constraint_filter_height_range_valid_pad():
264 # Avg pool restrictions are dependent on padding:
265 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
266 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
267 assert support.is_operator_supported(op)
268 # VALID padding restricts to 256 in filter height
269 op.attrs["filter_height"] = 257
270 assert not support.is_operator_supported(op)
271
272
273def test_constraint_filter_product_height_range_valid_pad():
274 # Avg pool restrictions are dependent on padding:
275 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
276 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
277 assert support.is_operator_supported(op)
278 # VALID padding restricts filter W x H to 256x256
279 op.attrs["filter_width"] = 257
280 assert not support.is_operator_supported(op)
281
282
283def test_constraint_filter_height_range():
284 # Max pool restrictions arent dependent on padding
285 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
286 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
287 assert support.is_operator_supported(op)
288 # Restricts to 256 in filter height
289 op.attrs["filter_height"] = 257
290 assert not support.is_operator_supported(op)
291 # Doesnt matter if SAME or VALID
292 op.attrs["padding"] = Padding.VALID
293 assert not support.is_operator_supported(op)
294
295
296def test_constraint_filter_product_height_range():
297 # Max pool restrictions arent dependent on padding
298 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
299 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
300 assert support.is_operator_supported(op)
301 # Restricts filter W x H to 256x256
302 op.attrs["filter_width"] = 257
303 assert not support.is_operator_supported(op)
304 # Doesnt matter if SAME or VALID
305 op.attrs["padding"] = Padding.VALID
306 assert not support.is_operator_supported(op)
307
308
Tim Hall885033b2022-07-21 11:46:03 +0100309def test_constraint_resize():
310 for resize_op in Op.op_set(Op.is_resize_op):
311 # IFM W and H == 1
312 op = testutil.create_op_with_quant_tensors(resize_op, [1, 1, 1, 8], [1, 8, 8, 8])
313 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
314 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100315
Tim Hall885033b2022-07-21 11:46:03 +0100316 # IFM == OFM
317 op = testutil.create_op_with_quant_tensors(resize_op, [1, 8, 8, 8], [1, 8, 8, 8])
318 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
319 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100320
Tim Hall885033b2022-07-21 11:46:03 +0100321 # IFM x2 == OFM ; align_corners = False
322 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
323 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
324 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100325
Tim Hall885033b2022-07-21 11:46:03 +0100326 # IFM x4 == OFM ; align_corners = False
327 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 16, 16, 8])
328 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16], np.int32))
329 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100330
Tim Hall885033b2022-07-21 11:46:03 +0100331 # IFM x8 == OFM ; align_corners = False
332 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 32, 32, 8])
333 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32], np.int32))
334 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100335
Tim Hall885033b2022-07-21 11:46:03 +0100336 # IFM -1 x2 == OFM -1 ; align_corners = True
337 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 7, 7, 8])
338 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
339 op.attrs["align_corners"] = True
340 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100341
Tim Hall885033b2022-07-21 11:46:03 +0100342 # IFM -1 x4 == OFM -1 ; align_corners = True
343 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 13, 13, 8])
344 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13], np.int32))
345 op.attrs["align_corners"] = True
346 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100347
Tim Hall885033b2022-07-21 11:46:03 +0100348 # IFM -1 x8 == OFM -1 ; align_corners = True
349 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 25, 25, 8])
350 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25], np.int32))
351 op.attrs["align_corners"] = True
352 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100353
Tim Hall885033b2022-07-21 11:46:03 +0100354 # Invalid case - upscale size
355 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 17, 17, 8])
356 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17], np.int32))
357 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100358
Tim Hall885033b2022-07-21 11:46:03 +0100359 # Invalid case - upscale size with align corners
360 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 15, 15, 8])
361 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15], np.int32))
362 op.attrs["align_corners"] = True
363 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100364
365
Tim Hall885033b2022-07-21 11:46:03 +0100366def test_constraint_resize_size():
367 for resize_op in Op.op_set(Op.is_resize_op):
368 # Invalid case - size != ofm size
369 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
370 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
371 assert not support.is_operator_supported(op)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200372
373
Tim Hall885033b2022-07-21 11:46:03 +0100374def test_constraint_resize_attrs():
375 for resize_op in Op.op_set(Op.is_resize_op):
376 # Invalid case - both align corners and half-pixel centers
377 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
378 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
379 op.attrs["align_corners"] = True
380 op.attrs["half_pixel_centers"] = True
381 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100382
383
Tim Hall885033b2022-07-21 11:46:03 +0100384def test_constraint_resize_half_pixel_centers():
385 for resize_op in Op.op_set(Op.is_resize_op):
386 # Invalid case - half-pixel centers (not supported)
387 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
388 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
389 op.attrs["half_pixel_centers"] = True
390 assert not support.is_operator_supported(op)
erik.andersson@arm.comba2555e2021-10-28 14:08:52 +0200391
392
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200393def test_constraint_concat_pass():
394 # A working concat
395 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
396 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
397 ifm2.quantization = testutil.default_quant_params()
398 op.add_input_tensor(ifm2)
399 op.attrs["axis"] = 3
400 assert support.is_operator_supported(op)
401
402
403def create_pad_op(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200404 in_shape,
405 out_shape,
406 padding,
407 in_dtype=DataType.int8,
408 out_dtype=DataType.int8,
409 pad_dtype=DataType.int32,
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200410):
411 qp = testutil.default_quant_params()
412 in0 = Tensor(in_shape, in_dtype, "in")
413 in0.quantization = qp
414 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
415 out = Tensor(out_shape, out_dtype, "out")
416 out.quantization = qp.clone()
417 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
418 return op
419
420
421def test_constraint_padded_dimensions():
422 # Incorrect padding dimensions, can only pad width and height
Jonas Ohlssond8575072022-03-30 10:30:25 +0200423 op = create_pad_op(
424 in_shape=[1, 1, 1, 1],
425 out_shape=[1, 3, 3, 1],
426 padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
427 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200428 assert not support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200429 op = create_pad_op(
430 in_shape=[1, 1, 1, 1],
431 out_shape=[1, 3, 3, 1],
432 padding=[[1, 1], [1, 1], [0, 0]],
433 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200434 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200435 op = create_pad_op(
436 in_shape=[1, 1, 1, 1],
437 out_shape=[1, 3, 3, 1],
438 padding=[[1, 1], [1, 1], [0, 1]],
439 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200440 assert not support.is_operator_supported(op)
441
442
443def test_constraint_pad_shape():
444 # PAD operator must be of shape (3,2) or (4,2)
445 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
446 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200447 op = create_pad_op(
448 in_shape=[1, 1, 1, 1],
449 out_shape=[1, 3, 3, 1],
450 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
451 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200452 assert not support.is_operator_supported(op)
453
454
455def test_constraint_pad_none():
Jonas Ohlssond8575072022-03-30 10:30:25 +0200456 op = create_pad_op(
457 in_shape=[1, 1, 1, 1],
458 out_shape=[1, 3, 3, 1],
459 padding=[],
460 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200461 assert not support.is_operator_supported(op)
462
463
464def test_constraint_pad_dtype():
465 # PAD operator dtype should be int32 or int64
466 op = create_pad_op(
467 in_shape=[1, 1, 1, 1],
468 out_shape=[1, 3, 3, 1],
469 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
470 pad_dtype=DataType.int16,
471 )
472 assert not support.is_operator_supported(op)
473
474
475def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
476 qp = testutil.default_quant_params()
477 in0 = Tensor(in_shape, DataType.uint8, "in")
478 in0.quantization = qp
479 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
480 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
481 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
482 out = Tensor(out_shape, DataType.uint8, "out")
483 out.quantization = qp
484 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
485 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
486
487
488def create_strided_slice():
489 # Creates a valid strided slice operator with some valid inputs/outputs
490 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
491 op.attrs["begin_mask"] = 1
492 op.attrs["end_mask"] = 9
493 assert support.is_operator_supported(op)
494 return op
495
496
497def test_constraint_stridedslice_stride_values():
498 # Unsupported strides
499 op = create_strided_slice()
500 op.inputs[3].values = [1, 1, 2, 1]
501 assert not support.is_operator_supported(op)
502
503
504def test_constraint_inputs_int32():
505 # both inputs must be type int32
506 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
507 assert not support.is_operator_supported(op)
508 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
509 assert support.is_operator_supported(op)
510 op.ifm2.dtype = DataType.int16
511 assert not support.is_operator_supported(op)
512
513
514def test_constraint_output_int32():
515 # output must be type int32
516 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
517 assert support.is_operator_supported(op)
518 op.ofm.dtype = DataType.int16
519 assert not support.is_operator_supported(op)
520
521
522def test_constraint_matching_quantization_parameters():
523 qp = QuantizationParameters()
524 qp.scale_f32 = np.float32(1.5)
525 qp.zero_point = 128
526 # valid - all matching (uses default quant params)
527 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
528 assert support.is_operator_supported(op)
529 # invalid - ifm mismatch ofm
530 op.ifm.quantization = qp
531 assert not support.is_operator_supported(op)
532 # invalid - ifm2 mismatch ofm
533 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
534 op.ifm2.quantization = qp
535 assert not support.is_operator_supported(op)
536 # invalid - both ifm and ifm2 mismatch ofm
537 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
538 op.ifm.quantization = qp
539 op.ifm2.quantization = qp
540 assert not support.is_operator_supported(op)
541 # valid - all matching
542 op.ofm.quantization = qp
543 assert support.is_operator_supported(op)
544 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
545 assert support.is_operator_supported(op)
546
547
548def test_constraint_elemwise_batch_size():
549 # BINARY CASE
550 # Batch can be >1 if dims is <=2D
551 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
552 assert support.is_operator_supported(op)
553 # For dims >2D, batch must be 1
554 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
555 assert support.is_operator_supported(op)
556 # invalid case
557 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
558 assert not support.is_operator_supported(op)
559
560 # UNARY CASE
561 # Batch can be >1 if dims is <=2D
562 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
563 assert support.is_operator_supported(op)
564 # For dims >2D, batch must be 1
565 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
566 assert support.is_operator_supported(op)
567 # invalid case
568 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
569 assert not support.is_operator_supported(op)
570
571
572def test_constraint_broadcast_shapes():
573 # BINARY CASE
574 # Only allow broadcast to 1 dim, for 1 rank index
575 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
576 assert support.is_operator_supported(op)
577 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
578 assert support.is_operator_supported(op)
579 # Only allow broadcast to 1 dim, for 3 rank indexes
580 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
581 assert support.is_operator_supported(op)
582 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
583 assert support.is_operator_supported(op)
584 # One broadcast dim not 1
585 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
586 assert not support.is_operator_supported(op)
587 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
588 assert not support.is_operator_supported(op)
589 # OFM shape dim largest ifm/ifm2 shape dim
590 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
591 assert not support.is_operator_supported(op)
592 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
593 assert not support.is_operator_supported(op)
594 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
595 assert not support.is_operator_supported(op)
596 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
597 assert not support.is_operator_supported(op)
598
599
600def create_mean(input_shape, output_shape, axis, datatype, attrs):
601 ifm = Tensor(input_shape, datatype, "in")
602 ifm.quantization = testutil.default_quant_params()
603 ofm = Tensor(output_shape, datatype, "out")
604 ofm.quantization = testutil.default_quant_params()
605 if type(axis) is list:
606 indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
607 elif type(axis) is int:
608 indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
609 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
610 return op
611
612
613def test_mean_hw_product():
614 op = create_mean([1, 64, 64, 16], [1, 16], [1, 2], DataType.uint8, {})
615 assert support.is_operator_supported(op)
616 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
617 assert not support.is_operator_supported(op)
618
619
620def test_mean_hw_product_int8():
621 op = create_mean([1, 16, 16, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
622 assert support.is_operator_supported(op)
623 op = create_mean([1, 16, 17, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
624 assert not support.is_operator_supported(op)
625
Johan Alfvén17009392022-08-30 09:14:56 +0200626 # Create OP that will not saturate the accumulator
627 op = create_mean([1, 5, 14, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
628 op.ifm.quantization.scale_f32 = 2.0
629 op.ifm.quantization.zero_point = 95
630 op.ofm.quantization.scale_f32 = 1.0
631 op.ofm.quantization.zero_point = 95
632 assert support.is_operator_supported(op)
633
634 # Create OP that can saturate the accumulator
635 op = create_mean([1, 6, 14, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
636 op.ifm.quantization.scale_f32 = 2.0
637 op.ifm.quantization.zero_point = 95
638 op.ofm.quantization.scale_f32 = 1.0
639 op.ofm.quantization.zero_point = 95
640 assert not support.is_operator_supported(op)
641
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200642
643def test_mean_hw_product_avgpool():
644 op = create_mean([1, 200, 200, 16], [1, 16], [1, 2], DataType.uint8, {"keep_dims": False})
645 assert support.is_operator_supported(op)
646 op = create_mean([1, 200, 200, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
647 assert not support.is_operator_supported(op)