blob: ab12e417eb67121cdb2f634d1ee7486c09457e99 [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for tflite support_operators
19import numpy as np
20
21from ethosu.vela.data_type import DataType
22from ethosu.vela.operation import ActivationFunction
23from ethosu.vela.operation import Op
24from ethosu.vela.operation import Padding
25from ethosu.vela.tensor import create_const_tensor
26from ethosu.vela.tensor import QuantizationParameters
27from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
30
31support = TFLiteSupportedOperators()
32
33
34def test_constraint_tens_dtype():
35 # Tensors can only be of type uint8, int8, int16 and int32
36 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
37 assert not support.is_operator_supported(op)
38
39
40def test_constraint_tens_int32_ops():
41 # For int32, only select op types are allowed:
42 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
43 assert support.is_operator_supported(op)
44 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
45 assert not support.is_operator_supported(op)
46
47
48def test_constraint_tens_dimension():
49 # Tensors can only have values in the inclusive range of 1-65535
50 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
51 assert not support.is_operator_supported(op)
52
53
54def test_constraint_tens_quant_per_axis_not_supp():
55 # Quantization scale cannot be array-valued for elemwise ops
56 qp = QuantizationParameters()
57 qp.zero_point = np.zeros((1, 3))
58 qp.scale_f32 = np.ones((1, 3))
59 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
60 assert not support.is_operator_supported(op)
61
62
63def test_constraint_tens_quant_per_axis_is_supp():
64 op = testutil.create_op_with_quant_tensors(
65 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
66 )
67 op.attrs = {"stride_w": 1, "stride_h": 1}
68 assert support.is_operator_supported(op)
69 qp = QuantizationParameters()
70 qp.zero_point = np.zeros((1, 3))
71 qp.scale_f32 = np.ones((1, 3))
72 op.bias.quantization = qp
73 assert support.is_operator_supported(op)
74
75
76def test_constraint_fc_output_2d_is_supp():
77 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
78 assert support.is_operator_supported(op)
79 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
80 assert support.is_operator_supported(op)
81
82
83def test_constraint_faf():
84 # Fused activation functions, if set, must be a valid op type
85 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
86 op.activation = ActivationFunction(Op.Conv2D)
87 assert not support.is_operator_supported(op)
88
89
90def test_constraint_faf_ofm_dtype():
91 # If fused activation function is present, OFM must be 8 or 16 bit
92 shp = [1, 8, 8, 8]
93 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
94 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
95 op.activation = ActivationFunction(Op.Relu)
96 expected = dtype.size_in_bytes() <= 2
97 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
98
99
100def test_constraint_conv_pass():
101 # First test a simple conv passes
102 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
103 op.attrs = {"stride_w": 1, "stride_h": 1}
104 assert support.is_operator_supported(op)
105
106
107def test_constraint_stride_range():
108 # Stride width and height must lie within a certain range
109 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
110 op.attrs = {"stride_w": 0, "stride_h": 20}
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_dilation_range():
115 # Dilation width and height must lie within a certain range
116 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
117 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
118 assert not support.is_operator_supported(op)
119
120
121def test_constraint_dilated_height_range():
122 # Dilated kernel height must lie within a certain range
123 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
124 op.attrs = {"stride_w": 1, "stride_h": 1}
125 assert not support.is_operator_supported(op)
126
127
128def test_constraint_dilated_product_range():
129 # Dilated kernel width x height must lie within a certain range
130 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
131 op.attrs = {"stride_w": 1, "stride_h": 1}
132 assert not support.is_operator_supported(op)
133
134
135def test_constraint_weights_type():
136 # Weight tensor must be 8-bit
137 op = testutil.create_op_with_quant_tensors(
138 Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
139 )
140 op.attrs = {"stride_w": 1, "stride_h": 1}
141 assert not support.is_operator_supported(op)
142
143
144def test_constraint_weights_const():
145 # Weight tensor cannot be non-const tensors
146 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
147 op.attrs = {"stride_w": 1, "stride_h": 1}
148 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
149 weights.quantization = testutil.default_quant_params()
150 op.add_input_tensor(weights)
151 assert not support.is_operator_supported(op)
152
153
154def test_constraint_weights_limit():
155 # Sum of weights has a limit
156 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
157 op.attrs = {"stride_w": 1, "stride_h": 1}
158 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
159 assert not support.is_operator_supported(op)
160
161
162def test_constraint_bias_type():
163 # Bias must have a certain datatype
164 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
165 op.attrs = {"stride_w": 1, "stride_h": 1}
166 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
167 op.add_input_tensor(bias)
168 assert not support.is_operator_supported(op)
169
170
171def test_constraint_bias_40bit():
172 # Bias must not exceed 40-bit
173 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
174 op.attrs = {"stride_w": 1, "stride_h": 1}
175 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
176 bias.values = np.array([0x01FF_FFFF_FFFF])
177 op.add_input_tensor(bias)
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_batch_size():
182 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
183 op.attrs = {"stride_w": 1, "stride_h": 1}
184 assert not support.is_operator_supported(op)
185
186
187def test_constraint_depth_multiplier():
188 # Valid. Depth multiplier is 1 so no further constraints
189 op = testutil.create_op_with_quant_tensors(
190 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
191 )
192 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
193 assert support.is_operator_supported(op)
194 # Invalid. Depth multiplier doesnt equal ofm channel
195 op = testutil.create_op_with_quant_tensors(
196 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
197 )
198 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
199 assert not support.is_operator_supported(op)
200 # Valid. Depth multiplier is equal to ofm channel
201 op = testutil.create_op_with_quant_tensors(
202 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
203 )
204 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
205 assert support.is_operator_supported(op)
206
207
208def test_constraint_tconv_stride():
209 # Strides must be 2
210 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
211 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
212 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
213 ifm.quantization = testutil.default_quant_params()
214 op.add_input_tensor(ifm)
215 assert not support.is_operator_supported(op)
216
217
218def test_constraint_tconv_same():
219 # Valid
220 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
221 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
222 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
223 ifm.quantization = testutil.default_quant_params()
224 op.add_input_tensor(ifm)
225 assert support.is_operator_supported(op)
226 # Invalid
227 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
228 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
229 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
230 ifm.quantization = testutil.default_quant_params()
231 op.add_input_tensor(ifm)
232 assert not support.is_operator_supported(op)
233
234
235def test_constraint_tconv_valid():
236 # Valid
237 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
238 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
239 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
240 ifm.quantization = testutil.default_quant_params()
241 op.add_input_tensor(ifm)
242 assert support.is_operator_supported(op)
243 # Invalid
244 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
245 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
246 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
247 ifm.quantization = testutil.default_quant_params()
248 op.add_input_tensor(ifm)
249 assert not support.is_operator_supported(op)
250
251
252def test_constraint_filter_range():
253 # Avg pool restrictions are dependent on padding:
254 # SAME padding restricts both W and H to max 8
255 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
256 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
257 assert not support.is_operator_supported(op)
258 # VALID padding limits are much larger
259 op.attrs["padding"] = Padding.VALID
260 assert support.is_operator_supported(op)
261
262
263def test_constraint_filter_height_range_valid_pad():
264 # Avg pool restrictions are dependent on padding:
265 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
266 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
267 assert support.is_operator_supported(op)
268 # VALID padding restricts to 256 in filter height
269 op.attrs["filter_height"] = 257
270 assert not support.is_operator_supported(op)
271
272
273def test_constraint_filter_product_height_range_valid_pad():
274 # Avg pool restrictions are dependent on padding:
275 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
276 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
277 assert support.is_operator_supported(op)
278 # VALID padding restricts filter W x H to 256x256
279 op.attrs["filter_width"] = 257
280 assert not support.is_operator_supported(op)
281
282
283def test_constraint_filter_height_range():
284 # Max pool restrictions arent dependent on padding
285 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
286 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
287 assert support.is_operator_supported(op)
288 # Restricts to 256 in filter height
289 op.attrs["filter_height"] = 257
290 assert not support.is_operator_supported(op)
291 # Doesnt matter if SAME or VALID
292 op.attrs["padding"] = Padding.VALID
293 assert not support.is_operator_supported(op)
294
295
296def test_constraint_filter_product_height_range():
297 # Max pool restrictions arent dependent on padding
298 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
299 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
300 assert support.is_operator_supported(op)
301 # Restricts filter W x H to 256x256
302 op.attrs["filter_width"] = 257
303 assert not support.is_operator_supported(op)
304 # Doesnt matter if SAME or VALID
305 op.attrs["padding"] = Padding.VALID
306 assert not support.is_operator_supported(op)
307
308
Tim Hall47c76362022-07-18 21:26:47 +0100309def test_constraint_bilinear_resize():
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200310 # IFM W and H == 1
311 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
Tim Hall47c76362022-07-18 21:26:47 +0100312 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200313 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100314
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200315 # IFM == OFM
316 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
Tim Hall47c76362022-07-18 21:26:47 +0100317 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200318 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100319
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200320 # IFM x2 == OFM ; align_corners = False
321 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
Tim Hall47c76362022-07-18 21:26:47 +0100322 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200323 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100324
325 # IFM x4 == OFM ; align_corners = False
326 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 16, 16, 8])
327 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16], np.int32))
328 assert support.is_operator_supported(op)
329
330 # IFM x8 == OFM ; align_corners = False
331 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 32, 32, 8])
332 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32], np.int32))
333 assert support.is_operator_supported(op)
334
335 # IFM -1 x2 == OFM -1 ; align_corners = True
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200336 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
Tim Hall47c76362022-07-18 21:26:47 +0100337 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200338 op.attrs["align_corners"] = True
339 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100340
341 # IFM -1 x4 == OFM -1 ; align_corners = True
342 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 13, 13, 8])
343 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13], np.int32))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200344 op.attrs["align_corners"] = True
Tim Hall47c76362022-07-18 21:26:47 +0100345 assert support.is_operator_supported(op)
346
347 # IFM -1 x8 == OFM -1 ; align_corners = True
348 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 25, 25, 8])
349 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25], np.int32))
350 op.attrs["align_corners"] = True
351 assert support.is_operator_supported(op)
352
353 # Invalid case - upscale size
354 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 17, 17, 8])
355 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17], np.int32))
356 assert not support.is_operator_supported(op)
357
358 # Invalid case - upscale size with align corners
359 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 15, 15, 8])
360 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15], np.int32))
361 op.attrs["align_corners"] = True
362 assert not support.is_operator_supported(op)
363
364
365def test_constraint_bilinear_resize_size():
366 # Invalid case - size != ofm size
367 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
368 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200369 assert not support.is_operator_supported(op)
370
371
erik.andersson@arm.comba2555e2021-10-28 14:08:52 +0200372def test_constraint_bilinear_resize_attrs():
Tim Hall47c76362022-07-18 21:26:47 +0100373 # Invalid case - both align corners and half-pixel centers
374 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
375 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
376 op.attrs["align_corners"] = True
377 op.attrs["half_pixel_centers"] = True
378 assert not support.is_operator_supported(op)
379
380
381def test_constraint_bilinear_resize_hpc():
382 # Invalid case - half-pixel centers (not supported)
383 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
384 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
erik.andersson@arm.comba2555e2021-10-28 14:08:52 +0200385 op.attrs["half_pixel_centers"] = True
386 assert not support.is_operator_supported(op)
387
388
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200389def test_constraint_concat_pass():
390 # A working concat
391 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
392 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
393 ifm2.quantization = testutil.default_quant_params()
394 op.add_input_tensor(ifm2)
395 op.attrs["axis"] = 3
396 assert support.is_operator_supported(op)
397
398
399def create_pad_op(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200400 in_shape,
401 out_shape,
402 padding,
403 in_dtype=DataType.int8,
404 out_dtype=DataType.int8,
405 pad_dtype=DataType.int32,
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200406):
407 qp = testutil.default_quant_params()
408 in0 = Tensor(in_shape, in_dtype, "in")
409 in0.quantization = qp
410 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
411 out = Tensor(out_shape, out_dtype, "out")
412 out.quantization = qp.clone()
413 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
414 return op
415
416
417def test_constraint_padded_dimensions():
418 # Incorrect padding dimensions, can only pad width and height
Jonas Ohlssond8575072022-03-30 10:30:25 +0200419 op = create_pad_op(
420 in_shape=[1, 1, 1, 1],
421 out_shape=[1, 3, 3, 1],
422 padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
423 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200424 assert not support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200425 op = create_pad_op(
426 in_shape=[1, 1, 1, 1],
427 out_shape=[1, 3, 3, 1],
428 padding=[[1, 1], [1, 1], [0, 0]],
429 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200430 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200431 op = create_pad_op(
432 in_shape=[1, 1, 1, 1],
433 out_shape=[1, 3, 3, 1],
434 padding=[[1, 1], [1, 1], [0, 1]],
435 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200436 assert not support.is_operator_supported(op)
437
438
439def test_constraint_pad_shape():
440 # PAD operator must be of shape (3,2) or (4,2)
441 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
442 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200443 op = create_pad_op(
444 in_shape=[1, 1, 1, 1],
445 out_shape=[1, 3, 3, 1],
446 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
447 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200448 assert not support.is_operator_supported(op)
449
450
451def test_constraint_pad_none():
Jonas Ohlssond8575072022-03-30 10:30:25 +0200452 op = create_pad_op(
453 in_shape=[1, 1, 1, 1],
454 out_shape=[1, 3, 3, 1],
455 padding=[],
456 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200457 assert not support.is_operator_supported(op)
458
459
460def test_constraint_pad_dtype():
461 # PAD operator dtype should be int32 or int64
462 op = create_pad_op(
463 in_shape=[1, 1, 1, 1],
464 out_shape=[1, 3, 3, 1],
465 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
466 pad_dtype=DataType.int16,
467 )
468 assert not support.is_operator_supported(op)
469
470
471def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
472 qp = testutil.default_quant_params()
473 in0 = Tensor(in_shape, DataType.uint8, "in")
474 in0.quantization = qp
475 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
476 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
477 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
478 out = Tensor(out_shape, DataType.uint8, "out")
479 out.quantization = qp
480 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
481 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
482
483
484def create_strided_slice():
485 # Creates a valid strided slice operator with some valid inputs/outputs
486 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
487 op.attrs["begin_mask"] = 1
488 op.attrs["end_mask"] = 9
489 assert support.is_operator_supported(op)
490 return op
491
492
493def test_constraint_stridedslice_stride_values():
494 # Unsupported strides
495 op = create_strided_slice()
496 op.inputs[3].values = [1, 1, 2, 1]
497 assert not support.is_operator_supported(op)
498
499
500def test_constraint_inputs_int32():
501 # both inputs must be type int32
502 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
503 assert not support.is_operator_supported(op)
504 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
505 assert support.is_operator_supported(op)
506 op.ifm2.dtype = DataType.int16
507 assert not support.is_operator_supported(op)
508
509
510def test_constraint_output_int32():
511 # output must be type int32
512 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
513 assert support.is_operator_supported(op)
514 op.ofm.dtype = DataType.int16
515 assert not support.is_operator_supported(op)
516
517
518def test_constraint_matching_quantization_parameters():
519 qp = QuantizationParameters()
520 qp.scale_f32 = np.float32(1.5)
521 qp.zero_point = 128
522 # valid - all matching (uses default quant params)
523 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
524 assert support.is_operator_supported(op)
525 # invalid - ifm mismatch ofm
526 op.ifm.quantization = qp
527 assert not support.is_operator_supported(op)
528 # invalid - ifm2 mismatch ofm
529 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
530 op.ifm2.quantization = qp
531 assert not support.is_operator_supported(op)
532 # invalid - both ifm and ifm2 mismatch ofm
533 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
534 op.ifm.quantization = qp
535 op.ifm2.quantization = qp
536 assert not support.is_operator_supported(op)
537 # valid - all matching
538 op.ofm.quantization = qp
539 assert support.is_operator_supported(op)
540 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
541 assert support.is_operator_supported(op)
542
543
544def test_constraint_elemwise_batch_size():
545 # BINARY CASE
546 # Batch can be >1 if dims is <=2D
547 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
548 assert support.is_operator_supported(op)
549 # For dims >2D, batch must be 1
550 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
551 assert support.is_operator_supported(op)
552 # invalid case
553 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
554 assert not support.is_operator_supported(op)
555
556 # UNARY CASE
557 # Batch can be >1 if dims is <=2D
558 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
559 assert support.is_operator_supported(op)
560 # For dims >2D, batch must be 1
561 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
562 assert support.is_operator_supported(op)
563 # invalid case
564 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
565 assert not support.is_operator_supported(op)
566
567
568def test_constraint_broadcast_shapes():
569 # BINARY CASE
570 # Only allow broadcast to 1 dim, for 1 rank index
571 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
572 assert support.is_operator_supported(op)
573 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
574 assert support.is_operator_supported(op)
575 # Only allow broadcast to 1 dim, for 3 rank indexes
576 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
577 assert support.is_operator_supported(op)
578 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
579 assert support.is_operator_supported(op)
580 # One broadcast dim not 1
581 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
582 assert not support.is_operator_supported(op)
583 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
584 assert not support.is_operator_supported(op)
585 # OFM shape dim largest ifm/ifm2 shape dim
586 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
587 assert not support.is_operator_supported(op)
588 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
589 assert not support.is_operator_supported(op)
590 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
591 assert not support.is_operator_supported(op)
592 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
593 assert not support.is_operator_supported(op)
594
595
596def create_mean(input_shape, output_shape, axis, datatype, attrs):
597 ifm = Tensor(input_shape, datatype, "in")
598 ifm.quantization = testutil.default_quant_params()
599 ofm = Tensor(output_shape, datatype, "out")
600 ofm.quantization = testutil.default_quant_params()
601 if type(axis) is list:
602 indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
603 elif type(axis) is int:
604 indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
605 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
606 return op
607
608
609def test_mean_hw_product():
610 op = create_mean([1, 64, 64, 16], [1, 16], [1, 2], DataType.uint8, {})
611 assert support.is_operator_supported(op)
612 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
613 assert not support.is_operator_supported(op)
614
615
616def test_mean_hw_product_int8():
617 op = create_mean([1, 16, 16, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
618 assert support.is_operator_supported(op)
619 op = create_mean([1, 16, 17, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
620 assert not support.is_operator_supported(op)
621
622
623def test_mean_hw_product_avgpool():
624 op = create_mean([1, 200, 200, 16], [1, 16], [1, 2], DataType.uint8, {"keep_dims": False})
625 assert support.is_operator_supported(op)
626 op = create_mean([1, 200, 200, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
627 assert not support.is_operator_supported(op)