blob: 2713adf97f219a990f513457960654bf8855dd10 [file] [log] [blame]
Tim Hall3b1578e2023-01-13 17:57:25 +00001# SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for tflite support_operators
19import numpy as np
Raul Farkas090f18a2023-01-24 16:29:06 +000020import pytest
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020021
22from ethosu.vela.data_type import DataType
23from ethosu.vela.operation import ActivationFunction
24from ethosu.vela.operation import Op
25from ethosu.vela.operation import Padding
26from ethosu.vela.tensor import create_const_tensor
27from ethosu.vela.tensor import QuantizationParameters
28from ethosu.vela.tensor import Tensor
29from ethosu.vela.test import testutil
30from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
31
32support = TFLiteSupportedOperators()
33
34
35def test_constraint_tens_dtype():
36 # Tensors can only be of type uint8, int8, int16 and int32
37 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
38 assert not support.is_operator_supported(op)
39
40
41def test_constraint_tens_int32_ops():
42 # For int32, only select op types are allowed:
43 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
44 assert support.is_operator_supported(op)
45 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
46 assert not support.is_operator_supported(op)
47
48
49def test_constraint_tens_dimension():
50 # Tensors can only have values in the inclusive range of 1-65535
51 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
52 assert not support.is_operator_supported(op)
53
54
55def test_constraint_tens_quant_per_axis_not_supp():
56 # Quantization scale cannot be array-valued for elemwise ops
57 qp = QuantizationParameters()
58 qp.zero_point = np.zeros((1, 3))
59 qp.scale_f32 = np.ones((1, 3))
60 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
61 assert not support.is_operator_supported(op)
62
63
64def test_constraint_tens_quant_per_axis_is_supp():
65 op = testutil.create_op_with_quant_tensors(
Johan Alfvénfaa4b782022-12-07 13:56:17 +010066 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[3]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +020067 )
68 op.attrs = {"stride_w": 1, "stride_h": 1}
69 assert support.is_operator_supported(op)
70 qp = QuantizationParameters()
71 qp.zero_point = np.zeros((1, 3))
72 qp.scale_f32 = np.ones((1, 3))
73 op.bias.quantization = qp
74 assert support.is_operator_supported(op)
75
76
77def test_constraint_fc_output_2d_is_supp():
78 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
79 assert support.is_operator_supported(op)
80 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
81 assert support.is_operator_supported(op)
82
83
84def test_constraint_faf():
85 # Fused activation functions, if set, must be a valid op type
86 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
87 op.activation = ActivationFunction(Op.Conv2D)
88 assert not support.is_operator_supported(op)
89
90
91def test_constraint_faf_ofm_dtype():
92 # If fused activation function is present, OFM must be 8 or 16 bit
93 shp = [1, 8, 8, 8]
94 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
95 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
96 op.activation = ActivationFunction(Op.Relu)
97 expected = dtype.size_in_bytes() <= 2
98 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
99
100
101def test_constraint_conv_pass():
102 # First test a simple conv passes
103 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
104 op.attrs = {"stride_w": 1, "stride_h": 1}
105 assert support.is_operator_supported(op)
106
107
Raul Farkas090f18a2023-01-24 16:29:06 +0000108@pytest.mark.parametrize(
109 "stride_w, stride_h, supported",
Raul Farkas59b9ab92023-02-09 10:03:27 +0000110 [
111 [0, 20, False],
112 [4, 1, True],
113 [4, 2, True],
114 [2, 2, True],
115 [4, 4, False],
116 [4, 5, False],
117 [5, 4, False],
118 [3, 3, True],
119 [1, 1, True],
120 [2, 4, False],
121 ],
Raul Farkas090f18a2023-01-24 16:29:06 +0000122)
123def test_constraint_stride_range(stride_w: int, stride_h: int, supported: bool):
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200124 # Stride width and height must lie within a certain range
Raul Farkas090f18a2023-01-24 16:29:06 +0000125 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], [1, 1, 1, 1])
126 op.attrs = {"stride_w": stride_w, "stride_h": stride_h}
127 assert support.is_operator_supported(op) == supported
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200128
129
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200130def test_constraint_dilated_height_range():
131 # Dilated kernel height must lie within a certain range
132 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
133 op.attrs = {"stride_w": 1, "stride_h": 1}
134 assert not support.is_operator_supported(op)
135
136
137def test_constraint_dilated_product_range():
138 # Dilated kernel width x height must lie within a certain range
139 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
140 op.attrs = {"stride_w": 1, "stride_h": 1}
141 assert not support.is_operator_supported(op)
142
143
144def test_constraint_weights_type():
145 # Weight tensor must be 8-bit
146 op = testutil.create_op_with_quant_tensors(
147 Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
148 )
149 op.attrs = {"stride_w": 1, "stride_h": 1}
150 assert not support.is_operator_supported(op)
151
152
153def test_constraint_weights_const():
154 # Weight tensor cannot be non-const tensors
155 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
156 op.attrs = {"stride_w": 1, "stride_h": 1}
157 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
158 weights.quantization = testutil.default_quant_params()
159 op.add_input_tensor(weights)
160 assert not support.is_operator_supported(op)
161
162
163def test_constraint_weights_limit():
164 # Sum of weights has a limit
165 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
166 op.attrs = {"stride_w": 1, "stride_h": 1}
167 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
168 assert not support.is_operator_supported(op)
169
170
171def test_constraint_bias_type():
172 # Bias must have a certain datatype
173 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
174 op.attrs = {"stride_w": 1, "stride_h": 1}
175 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
176 op.add_input_tensor(bias)
177 assert not support.is_operator_supported(op)
178
179
180def test_constraint_bias_40bit():
181 # Bias must not exceed 40-bit
182 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
183 op.attrs = {"stride_w": 1, "stride_h": 1}
184 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
185 bias.values = np.array([0x01FF_FFFF_FFFF])
186 op.add_input_tensor(bias)
187 assert not support.is_operator_supported(op)
188
189
190def test_constraint_batch_size():
191 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
192 op.attrs = {"stride_w": 1, "stride_h": 1}
193 assert not support.is_operator_supported(op)
194
195
196def test_constraint_depth_multiplier():
197 # Valid. Depth multiplier is 1 so no further constraints
198 op = testutil.create_op_with_quant_tensors(
199 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
200 )
201 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
202 assert support.is_operator_supported(op)
203 # Invalid. Depth multiplier doesnt equal ofm channel
204 op = testutil.create_op_with_quant_tensors(
205 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
206 )
207 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
208 assert not support.is_operator_supported(op)
209 # Valid. Depth multiplier is equal to ofm channel
210 op = testutil.create_op_with_quant_tensors(
211 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
212 )
213 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
214 assert support.is_operator_supported(op)
215
216
217def test_constraint_tconv_stride():
218 # Strides must be 2
219 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
220 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
221 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
222 ifm.quantization = testutil.default_quant_params()
223 op.add_input_tensor(ifm)
224 assert not support.is_operator_supported(op)
225
226
227def test_constraint_tconv_same():
228 # Valid
229 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
230 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
231 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
232 ifm.quantization = testutil.default_quant_params()
233 op.add_input_tensor(ifm)
234 assert support.is_operator_supported(op)
235 # Invalid
236 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
237 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
238 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
239 ifm.quantization = testutil.default_quant_params()
240 op.add_input_tensor(ifm)
241 assert not support.is_operator_supported(op)
242
243
244def test_constraint_tconv_valid():
245 # Valid
246 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
247 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
248 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
249 ifm.quantization = testutil.default_quant_params()
250 op.add_input_tensor(ifm)
251 assert support.is_operator_supported(op)
252 # Invalid
253 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
254 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
255 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
256 ifm.quantization = testutil.default_quant_params()
257 op.add_input_tensor(ifm)
258 assert not support.is_operator_supported(op)
259
260
261def test_constraint_filter_range():
262 # Avg pool restrictions are dependent on padding:
263 # SAME padding restricts both W and H to max 8
264 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
265 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
266 assert not support.is_operator_supported(op)
267 # VALID padding limits are much larger
268 op.attrs["padding"] = Padding.VALID
269 assert support.is_operator_supported(op)
270
271
272def test_constraint_filter_height_range_valid_pad():
273 # Avg pool restrictions are dependent on padding:
274 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
275 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
276 assert support.is_operator_supported(op)
277 # VALID padding restricts to 256 in filter height
278 op.attrs["filter_height"] = 257
279 assert not support.is_operator_supported(op)
280
281
282def test_constraint_filter_product_height_range_valid_pad():
283 # Avg pool restrictions are dependent on padding:
284 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
285 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
286 assert support.is_operator_supported(op)
287 # VALID padding restricts filter W x H to 256x256
288 op.attrs["filter_width"] = 257
289 assert not support.is_operator_supported(op)
290
291
292def test_constraint_filter_height_range():
293 # Max pool restrictions arent dependent on padding
294 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
295 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
296 assert support.is_operator_supported(op)
297 # Restricts to 256 in filter height
298 op.attrs["filter_height"] = 257
299 assert not support.is_operator_supported(op)
300 # Doesnt matter if SAME or VALID
301 op.attrs["padding"] = Padding.VALID
302 assert not support.is_operator_supported(op)
303
304
305def test_constraint_filter_product_height_range():
306 # Max pool restrictions arent dependent on padding
307 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
308 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
309 assert support.is_operator_supported(op)
310 # Restricts filter W x H to 256x256
311 op.attrs["filter_width"] = 257
312 assert not support.is_operator_supported(op)
313 # Doesnt matter if SAME or VALID
314 op.attrs["padding"] = Padding.VALID
315 assert not support.is_operator_supported(op)
316
317
Tim Hall885033b2022-07-21 11:46:03 +0100318def test_constraint_resize():
319 for resize_op in Op.op_set(Op.is_resize_op):
320 # IFM W and H == 1
321 op = testutil.create_op_with_quant_tensors(resize_op, [1, 1, 1, 8], [1, 8, 8, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000322 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
Tim Hall885033b2022-07-21 11:46:03 +0100323 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100324
Tim Hall885033b2022-07-21 11:46:03 +0100325 # IFM == OFM
326 op = testutil.create_op_with_quant_tensors(resize_op, [1, 8, 8, 8], [1, 8, 8, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000327 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
Tim Hall885033b2022-07-21 11:46:03 +0100328 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100329
Tim Hall885033b2022-07-21 11:46:03 +0100330 # IFM x2 == OFM ; align_corners = False
331 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000332 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
Tim Hall885033b2022-07-21 11:46:03 +0100333 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100334
Tim Hall885033b2022-07-21 11:46:03 +0100335 # IFM x4 == OFM ; align_corners = False
336 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 16, 16, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000337 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16]))
Tim Hall885033b2022-07-21 11:46:03 +0100338 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100339
Tim Hall885033b2022-07-21 11:46:03 +0100340 # IFM x8 == OFM ; align_corners = False
341 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 32, 32, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000342 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32]))
Tim Hall885033b2022-07-21 11:46:03 +0100343 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100344
Tim Hall885033b2022-07-21 11:46:03 +0100345 # IFM -1 x2 == OFM -1 ; align_corners = True
346 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 7, 7, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000347 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7]))
Tim Hall885033b2022-07-21 11:46:03 +0100348 op.attrs["align_corners"] = True
349 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100350
Tim Hall885033b2022-07-21 11:46:03 +0100351 # IFM -1 x4 == OFM -1 ; align_corners = True
352 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 13, 13, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000353 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13]))
Tim Hall885033b2022-07-21 11:46:03 +0100354 op.attrs["align_corners"] = True
355 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100356
Tim Hall885033b2022-07-21 11:46:03 +0100357 # IFM -1 x8 == OFM -1 ; align_corners = True
358 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 25, 25, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000359 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25]))
Tim Hall885033b2022-07-21 11:46:03 +0100360 op.attrs["align_corners"] = True
361 assert support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100362
Tim Hall885033b2022-07-21 11:46:03 +0100363 # Invalid case - upscale size
364 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 17, 17, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000365 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17]))
Tim Hall885033b2022-07-21 11:46:03 +0100366 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100367
Tim Hall885033b2022-07-21 11:46:03 +0100368 # Invalid case - upscale size with align corners
369 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 15, 15, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000370 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15]))
Tim Hall885033b2022-07-21 11:46:03 +0100371 op.attrs["align_corners"] = True
372 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100373
374
Tim Hall885033b2022-07-21 11:46:03 +0100375def test_constraint_resize_size():
376 for resize_op in Op.op_set(Op.is_resize_op):
377 # Invalid case - size != ofm size
378 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000379 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7]))
Tim Hall885033b2022-07-21 11:46:03 +0100380 assert not support.is_operator_supported(op)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200381
382
Tim Hall885033b2022-07-21 11:46:03 +0100383def test_constraint_resize_attrs():
384 for resize_op in Op.op_set(Op.is_resize_op):
385 # Invalid case - both align corners and half-pixel centers
386 op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
Tim Hall3b1578e2023-01-13 17:57:25 +0000387 op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
Tim Hall885033b2022-07-21 11:46:03 +0100388 op.attrs["align_corners"] = True
389 op.attrs["half_pixel_centers"] = True
390 assert not support.is_operator_supported(op)
Tim Hall47c76362022-07-18 21:26:47 +0100391
392
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200393def test_constraint_concat_pass():
394 # A working concat
395 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
396 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
397 ifm2.quantization = testutil.default_quant_params()
398 op.add_input_tensor(ifm2)
399 op.attrs["axis"] = 3
400 assert support.is_operator_supported(op)
401
402
403def create_pad_op(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200404 in_shape,
405 out_shape,
406 padding,
407 in_dtype=DataType.int8,
408 out_dtype=DataType.int8,
409 pad_dtype=DataType.int32,
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200410):
411 qp = testutil.default_quant_params()
412 in0 = Tensor(in_shape, in_dtype, "in")
413 in0.quantization = qp
Tim Hall3b1578e2023-01-13 17:57:25 +0000414 shape = [] if padding == [] else list(np.shape(padding))
415 pad_tensor = create_const_tensor(name="pad", shape=shape, values=padding, dtype=pad_dtype)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200416 out = Tensor(out_shape, out_dtype, "out")
417 out.quantization = qp.clone()
418 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
419 return op
420
421
422def test_constraint_padded_dimensions():
423 # Incorrect padding dimensions, can only pad width and height
Jonas Ohlssond8575072022-03-30 10:30:25 +0200424 op = create_pad_op(
425 in_shape=[1, 1, 1, 1],
426 out_shape=[1, 3, 3, 1],
427 padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
428 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200429 assert not support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200430 op = create_pad_op(
431 in_shape=[1, 1, 1, 1],
432 out_shape=[1, 3, 3, 1],
433 padding=[[1, 1], [1, 1], [0, 0]],
434 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200435 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200436 op = create_pad_op(
437 in_shape=[1, 1, 1, 1],
438 out_shape=[1, 3, 3, 1],
439 padding=[[1, 1], [1, 1], [0, 1]],
440 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200441 assert not support.is_operator_supported(op)
442
443
444def test_constraint_pad_shape():
445 # PAD operator must be of shape (3,2) or (4,2)
446 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
447 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200448 op = create_pad_op(
449 in_shape=[1, 1, 1, 1],
450 out_shape=[1, 3, 3, 1],
451 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
452 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200453 assert not support.is_operator_supported(op)
454
455
456def test_constraint_pad_none():
Jonas Ohlssond8575072022-03-30 10:30:25 +0200457 op = create_pad_op(
458 in_shape=[1, 1, 1, 1],
459 out_shape=[1, 3, 3, 1],
460 padding=[],
461 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200462 assert not support.is_operator_supported(op)
463
464
465def test_constraint_pad_dtype():
466 # PAD operator dtype should be int32 or int64
467 op = create_pad_op(
468 in_shape=[1, 1, 1, 1],
469 out_shape=[1, 3, 3, 1],
470 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
471 pad_dtype=DataType.int16,
472 )
473 assert not support.is_operator_supported(op)
474
475
476def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
477 qp = testutil.default_quant_params()
478 in0 = Tensor(in_shape, DataType.uint8, "in")
479 in0.quantization = qp
480 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
481 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
482 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
483 out = Tensor(out_shape, DataType.uint8, "out")
484 out.quantization = qp
485 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
486 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
487
488
489def create_strided_slice():
490 # Creates a valid strided slice operator with some valid inputs/outputs
491 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
492 op.attrs["begin_mask"] = 1
493 op.attrs["end_mask"] = 9
494 assert support.is_operator_supported(op)
495 return op
496
497
498def test_constraint_stridedslice_stride_values():
499 # Unsupported strides
500 op = create_strided_slice()
501 op.inputs[3].values = [1, 1, 2, 1]
502 assert not support.is_operator_supported(op)
503
504
505def test_constraint_inputs_int32():
506 # both inputs must be type int32
507 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
508 assert not support.is_operator_supported(op)
509 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
510 assert support.is_operator_supported(op)
511 op.ifm2.dtype = DataType.int16
512 assert not support.is_operator_supported(op)
513
514
515def test_constraint_output_int32():
516 # output must be type int32
517 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
518 assert support.is_operator_supported(op)
519 op.ofm.dtype = DataType.int16
520 assert not support.is_operator_supported(op)
521
522
523def test_constraint_matching_quantization_parameters():
524 qp = QuantizationParameters()
525 qp.scale_f32 = np.float32(1.5)
526 qp.zero_point = 128
527 # valid - all matching (uses default quant params)
528 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
529 assert support.is_operator_supported(op)
530 # invalid - ifm mismatch ofm
531 op.ifm.quantization = qp
532 assert not support.is_operator_supported(op)
533 # invalid - ifm2 mismatch ofm
534 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
535 op.ifm2.quantization = qp
536 assert not support.is_operator_supported(op)
537 # invalid - both ifm and ifm2 mismatch ofm
538 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
539 op.ifm.quantization = qp
540 op.ifm2.quantization = qp
541 assert not support.is_operator_supported(op)
542 # valid - all matching
543 op.ofm.quantization = qp
544 assert support.is_operator_supported(op)
545 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
546 assert support.is_operator_supported(op)
547
548
549def test_constraint_elemwise_batch_size():
550 # BINARY CASE
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200551 # Batch can be >1 if dims is <=3D
552 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200553 assert support.is_operator_supported(op)
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200554 # For dims >3D, batch must be 1
555 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200556 assert support.is_operator_supported(op)
557 # invalid case
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200558 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2])
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200559 assert not support.is_operator_supported(op)
560
561 # UNARY CASE
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200562 # Batch can be >1 if dims is <=3D
563 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200564 assert support.is_operator_supported(op)
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200565 # For dims >3D, batch must be 1
566 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2, 2], None, [1, 2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200567 assert support.is_operator_supported(op)
568 # invalid case
Fredrik Svedberg88d5b122022-09-16 16:24:55 +0200569 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2, 2], None, [2, 2, 2, 2], datatype=DataType.int32)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200570 assert not support.is_operator_supported(op)
571
572
573def test_constraint_broadcast_shapes():
574 # BINARY CASE
575 # Only allow broadcast to 1 dim, for 1 rank index
576 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
577 assert support.is_operator_supported(op)
578 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
579 assert support.is_operator_supported(op)
580 # Only allow broadcast to 1 dim, for 3 rank indexes
581 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
582 assert support.is_operator_supported(op)
583 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
584 assert support.is_operator_supported(op)
585 # One broadcast dim not 1
586 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
587 assert not support.is_operator_supported(op)
588 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
589 assert not support.is_operator_supported(op)
590 # OFM shape dim largest ifm/ifm2 shape dim
591 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
592 assert not support.is_operator_supported(op)
593 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
594 assert not support.is_operator_supported(op)
595 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
596 assert not support.is_operator_supported(op)
597 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
598 assert not support.is_operator_supported(op)
599
600
601def create_mean(input_shape, output_shape, axis, datatype, attrs):
602 ifm = Tensor(input_shape, datatype, "in")
603 ifm.quantization = testutil.default_quant_params()
604 ofm = Tensor(output_shape, datatype, "out")
605 ofm.quantization = testutil.default_quant_params()
606 if type(axis) is list:
Tim Hall3b1578e2023-01-13 17:57:25 +0000607 indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200608 elif type(axis) is int:
Tim Hall3b1578e2023-01-13 17:57:25 +0000609 indices = create_const_tensor("indices", [], DataType.int32, axis)
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200610 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
611 return op
612
613
614def test_mean_hw_product():
615 op = create_mean([1, 64, 64, 16], [1, 16], [1, 2], DataType.uint8, {})
616 assert support.is_operator_supported(op)
617 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
618 assert not support.is_operator_supported(op)
619
620
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200621def test_mean_hw_product_avgpool():
622 op = create_mean([1, 200, 200, 16], [1, 16], [1, 2], DataType.uint8, {"keep_dims": False})
623 assert support.is_operator_supported(op)
624 op = create_mean([1, 200, 200, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
625 assert not support.is_operator_supported(op)