blob: 04d3cba131ebb81c0e35ad6cc7e65943c2c7bec1 [file] [log] [blame]
Jonas Ohlsson45e653d2021-07-26 16:13:12 +02001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for tflite support_operators
19import numpy as np
20
21from ethosu.vela.data_type import DataType
22from ethosu.vela.operation import ActivationFunction
23from ethosu.vela.operation import Op
24from ethosu.vela.operation import Padding
25from ethosu.vela.tensor import create_const_tensor
26from ethosu.vela.tensor import QuantizationParameters
27from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators
30
31support = TFLiteSupportedOperators()
32
33
34def test_constraint_tens_dtype():
35 # Tensors can only be of type uint8, int8, int16 and int32
36 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
37 assert not support.is_operator_supported(op)
38
39
40def test_constraint_tens_int32_ops():
41 # For int32, only select op types are allowed:
42 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
43 assert support.is_operator_supported(op)
44 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
45 assert not support.is_operator_supported(op)
46
47
48def test_constraint_tens_dimension():
49 # Tensors can only have values in the inclusive range of 1-65535
50 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
51 assert not support.is_operator_supported(op)
52
53
54def test_constraint_tens_quant_per_axis_not_supp():
55 # Quantization scale cannot be array-valued for elemwise ops
56 qp = QuantizationParameters()
57 qp.zero_point = np.zeros((1, 3))
58 qp.scale_f32 = np.ones((1, 3))
59 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
60 assert not support.is_operator_supported(op)
61
62
63def test_constraint_tens_quant_per_axis_is_supp():
64 op = testutil.create_op_with_quant_tensors(
65 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
66 )
67 op.attrs = {"stride_w": 1, "stride_h": 1}
68 assert support.is_operator_supported(op)
69 qp = QuantizationParameters()
70 qp.zero_point = np.zeros((1, 3))
71 qp.scale_f32 = np.ones((1, 3))
72 op.bias.quantization = qp
73 assert support.is_operator_supported(op)
74
75
76def test_constraint_fc_output_2d_is_supp():
77 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
78 assert support.is_operator_supported(op)
79 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
80 assert support.is_operator_supported(op)
81
82
83def test_constraint_faf():
84 # Fused activation functions, if set, must be a valid op type
85 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
86 op.activation = ActivationFunction(Op.Conv2D)
87 assert not support.is_operator_supported(op)
88
89
90def test_constraint_faf_ofm_dtype():
91 # If fused activation function is present, OFM must be 8 or 16 bit
92 shp = [1, 8, 8, 8]
93 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
94 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
95 op.activation = ActivationFunction(Op.Relu)
96 expected = dtype.size_in_bytes() <= 2
97 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
98
99
100def test_constraint_conv_pass():
101 # First test a simple conv passes
102 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
103 op.attrs = {"stride_w": 1, "stride_h": 1}
104 assert support.is_operator_supported(op)
105
106
107def test_constraint_stride_range():
108 # Stride width and height must lie within a certain range
109 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
110 op.attrs = {"stride_w": 0, "stride_h": 20}
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_dilation_range():
115 # Dilation width and height must lie within a certain range
116 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
117 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
118 assert not support.is_operator_supported(op)
119
120
121def test_constraint_dilated_height_range():
122 # Dilated kernel height must lie within a certain range
123 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
124 op.attrs = {"stride_w": 1, "stride_h": 1}
125 assert not support.is_operator_supported(op)
126
127
128def test_constraint_dilated_product_range():
129 # Dilated kernel width x height must lie within a certain range
130 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
131 op.attrs = {"stride_w": 1, "stride_h": 1}
132 assert not support.is_operator_supported(op)
133
134
135def test_constraint_weights_type():
136 # Weight tensor must be 8-bit
137 op = testutil.create_op_with_quant_tensors(
138 Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
139 )
140 op.attrs = {"stride_w": 1, "stride_h": 1}
141 assert not support.is_operator_supported(op)
142
143
144def test_constraint_weights_const():
145 # Weight tensor cannot be non-const tensors
146 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8])
147 op.attrs = {"stride_w": 1, "stride_h": 1}
148 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
149 weights.quantization = testutil.default_quant_params()
150 op.add_input_tensor(weights)
151 assert not support.is_operator_supported(op)
152
153
154def test_constraint_weights_limit():
155 # Sum of weights has a limit
156 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
157 op.attrs = {"stride_w": 1, "stride_h": 1}
158 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
159 assert not support.is_operator_supported(op)
160
161
162def test_constraint_bias_type():
163 # Bias must have a certain datatype
164 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
165 op.attrs = {"stride_w": 1, "stride_h": 1}
166 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
167 op.add_input_tensor(bias)
168 assert not support.is_operator_supported(op)
169
170
171def test_constraint_bias_40bit():
172 # Bias must not exceed 40-bit
173 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
174 op.attrs = {"stride_w": 1, "stride_h": 1}
175 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
176 bias.values = np.array([0x01FF_FFFF_FFFF])
177 op.add_input_tensor(bias)
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_batch_size():
182 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
183 op.attrs = {"stride_w": 1, "stride_h": 1}
184 assert not support.is_operator_supported(op)
185
186
187def test_constraint_depth_multiplier():
188 # Valid. Depth multiplier is 1 so no further constraints
189 op = testutil.create_op_with_quant_tensors(
190 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
191 )
192 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
193 assert support.is_operator_supported(op)
194 # Invalid. Depth multiplier doesnt equal ofm channel
195 op = testutil.create_op_with_quant_tensors(
196 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
197 )
198 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
199 assert not support.is_operator_supported(op)
200 # Valid. Depth multiplier is equal to ofm channel
201 op = testutil.create_op_with_quant_tensors(
202 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
203 )
204 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
205 assert support.is_operator_supported(op)
206
207
208def test_constraint_tconv_stride():
209 # Strides must be 2
210 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
211 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
212 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
213 ifm.quantization = testutil.default_quant_params()
214 op.add_input_tensor(ifm)
215 assert not support.is_operator_supported(op)
216
217
218def test_constraint_tconv_same():
219 # Valid
220 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
221 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
222 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
223 ifm.quantization = testutil.default_quant_params()
224 op.add_input_tensor(ifm)
225 assert support.is_operator_supported(op)
226 # Invalid
227 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
228 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
229 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
230 ifm.quantization = testutil.default_quant_params()
231 op.add_input_tensor(ifm)
232 assert not support.is_operator_supported(op)
233
234
235def test_constraint_tconv_valid():
236 # Valid
237 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
238 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
239 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
240 ifm.quantization = testutil.default_quant_params()
241 op.add_input_tensor(ifm)
242 assert support.is_operator_supported(op)
243 # Invalid
244 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
245 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
246 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
247 ifm.quantization = testutil.default_quant_params()
248 op.add_input_tensor(ifm)
249 assert not support.is_operator_supported(op)
250
251
252def test_constraint_filter_range():
253 # Avg pool restrictions are dependent on padding:
254 # SAME padding restricts both W and H to max 8
255 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
256 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
257 assert not support.is_operator_supported(op)
258 # VALID padding limits are much larger
259 op.attrs["padding"] = Padding.VALID
260 assert support.is_operator_supported(op)
261
262
263def test_constraint_filter_height_range_valid_pad():
264 # Avg pool restrictions are dependent on padding:
265 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
266 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
267 assert support.is_operator_supported(op)
268 # VALID padding restricts to 256 in filter height
269 op.attrs["filter_height"] = 257
270 assert not support.is_operator_supported(op)
271
272
273def test_constraint_filter_product_height_range_valid_pad():
274 # Avg pool restrictions are dependent on padding:
275 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
276 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
277 assert support.is_operator_supported(op)
278 # VALID padding restricts filter W x H to 256x256
279 op.attrs["filter_width"] = 257
280 assert not support.is_operator_supported(op)
281
282
283def test_constraint_filter_height_range():
284 # Max pool restrictions arent dependent on padding
285 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
286 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
287 assert support.is_operator_supported(op)
288 # Restricts to 256 in filter height
289 op.attrs["filter_height"] = 257
290 assert not support.is_operator_supported(op)
291 # Doesnt matter if SAME or VALID
292 op.attrs["padding"] = Padding.VALID
293 assert not support.is_operator_supported(op)
294
295
296def test_constraint_filter_product_height_range():
297 # Max pool restrictions arent dependent on padding
298 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
299 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
300 assert support.is_operator_supported(op)
301 # Restricts filter W x H to 256x256
302 op.attrs["filter_width"] = 257
303 assert not support.is_operator_supported(op)
304 # Doesnt matter if SAME or VALID
305 op.attrs["padding"] = Padding.VALID
306 assert not support.is_operator_supported(op)
307
308
309def test_constraint_resize():
310 # IFM W and H == 1
311 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
312 assert support.is_operator_supported(op)
313 # IFM == OFM
314 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
315 assert support.is_operator_supported(op)
316 # IFM x2 == OFM ; align_corners = False
317 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
318 assert support.is_operator_supported(op)
319 # IFM x2 -1 == OFM ; align_corners = True
320 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
321 op.attrs["align_corners"] = True
322 assert support.is_operator_supported(op)
323 # Invalid cases
324 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
325 assert not support.is_operator_supported(op)
326 op.attrs["align_corners"] = True
327 assert not support.is_operator_supported(op)
328
329
erik.andersson@arm.comba2555e2021-10-28 14:08:52 +0200330def test_constraint_bilinear_resize_attrs():
331 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
332 assert support.is_operator_supported(op)
333 op.attrs["half_pixel_centers"] = True
334 assert not support.is_operator_supported(op)
335
336
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200337def test_constraint_concat_pass():
338 # A working concat
339 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
340 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
341 ifm2.quantization = testutil.default_quant_params()
342 op.add_input_tensor(ifm2)
343 op.attrs["axis"] = 3
344 assert support.is_operator_supported(op)
345
346
347def create_pad_op(
Jonas Ohlssond8575072022-03-30 10:30:25 +0200348 in_shape,
349 out_shape,
350 padding,
351 in_dtype=DataType.int8,
352 out_dtype=DataType.int8,
353 pad_dtype=DataType.int32,
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200354):
355 qp = testutil.default_quant_params()
356 in0 = Tensor(in_shape, in_dtype, "in")
357 in0.quantization = qp
358 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
359 out = Tensor(out_shape, out_dtype, "out")
360 out.quantization = qp.clone()
361 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
362 return op
363
364
365def test_constraint_padded_dimensions():
366 # Incorrect padding dimensions, can only pad width and height
Jonas Ohlssond8575072022-03-30 10:30:25 +0200367 op = create_pad_op(
368 in_shape=[1, 1, 1, 1],
369 out_shape=[1, 3, 3, 1],
370 padding=[[1, 1], [1, 1], [1, 1], [0, 0]],
371 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200372 assert not support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200373 op = create_pad_op(
374 in_shape=[1, 1, 1, 1],
375 out_shape=[1, 3, 3, 1],
376 padding=[[1, 1], [1, 1], [0, 0]],
377 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200378 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200379 op = create_pad_op(
380 in_shape=[1, 1, 1, 1],
381 out_shape=[1, 3, 3, 1],
382 padding=[[1, 1], [1, 1], [0, 1]],
383 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200384 assert not support.is_operator_supported(op)
385
386
387def test_constraint_pad_shape():
388 # PAD operator must be of shape (3,2) or (4,2)
389 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [0, 0]])
390 assert support.is_operator_supported(op)
Jonas Ohlssond8575072022-03-30 10:30:25 +0200391 op = create_pad_op(
392 in_shape=[1, 1, 1, 1],
393 out_shape=[1, 3, 3, 1],
394 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
395 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200396 assert not support.is_operator_supported(op)
397
398
399def test_constraint_pad_none():
Jonas Ohlssond8575072022-03-30 10:30:25 +0200400 op = create_pad_op(
401 in_shape=[1, 1, 1, 1],
402 out_shape=[1, 3, 3, 1],
403 padding=[],
404 )
Jonas Ohlsson45e653d2021-07-26 16:13:12 +0200405 assert not support.is_operator_supported(op)
406
407
408def test_constraint_pad_dtype():
409 # PAD operator dtype should be int32 or int64
410 op = create_pad_op(
411 in_shape=[1, 1, 1, 1],
412 out_shape=[1, 3, 3, 1],
413 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
414 pad_dtype=DataType.int16,
415 )
416 assert not support.is_operator_supported(op)
417
418
419def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
420 qp = testutil.default_quant_params()
421 in0 = Tensor(in_shape, DataType.uint8, "in")
422 in0.quantization = qp
423 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
424 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
425 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
426 out = Tensor(out_shape, DataType.uint8, "out")
427 out.quantization = qp
428 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
429 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
430
431
432def create_strided_slice():
433 # Creates a valid strided slice operator with some valid inputs/outputs
434 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
435 op.attrs["begin_mask"] = 1
436 op.attrs["end_mask"] = 9
437 assert support.is_operator_supported(op)
438 return op
439
440
441def test_constraint_stridedslice_stride_values():
442 # Unsupported strides
443 op = create_strided_slice()
444 op.inputs[3].values = [1, 1, 2, 1]
445 assert not support.is_operator_supported(op)
446
447
448def test_constraint_inputs_int32():
449 # both inputs must be type int32
450 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
451 assert not support.is_operator_supported(op)
452 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
453 assert support.is_operator_supported(op)
454 op.ifm2.dtype = DataType.int16
455 assert not support.is_operator_supported(op)
456
457
458def test_constraint_output_int32():
459 # output must be type int32
460 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
461 assert support.is_operator_supported(op)
462 op.ofm.dtype = DataType.int16
463 assert not support.is_operator_supported(op)
464
465
466def test_constraint_matching_quantization_parameters():
467 qp = QuantizationParameters()
468 qp.scale_f32 = np.float32(1.5)
469 qp.zero_point = 128
470 # valid - all matching (uses default quant params)
471 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
472 assert support.is_operator_supported(op)
473 # invalid - ifm mismatch ofm
474 op.ifm.quantization = qp
475 assert not support.is_operator_supported(op)
476 # invalid - ifm2 mismatch ofm
477 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
478 op.ifm2.quantization = qp
479 assert not support.is_operator_supported(op)
480 # invalid - both ifm and ifm2 mismatch ofm
481 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
482 op.ifm.quantization = qp
483 op.ifm2.quantization = qp
484 assert not support.is_operator_supported(op)
485 # valid - all matching
486 op.ofm.quantization = qp
487 assert support.is_operator_supported(op)
488 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
489 assert support.is_operator_supported(op)
490
491
492def test_constraint_elemwise_batch_size():
493 # BINARY CASE
494 # Batch can be >1 if dims is <=2D
495 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
496 assert support.is_operator_supported(op)
497 # For dims >2D, batch must be 1
498 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
499 assert support.is_operator_supported(op)
500 # invalid case
501 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
502 assert not support.is_operator_supported(op)
503
504 # UNARY CASE
505 # Batch can be >1 if dims is <=2D
506 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
507 assert support.is_operator_supported(op)
508 # For dims >2D, batch must be 1
509 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
510 assert support.is_operator_supported(op)
511 # invalid case
512 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
513 assert not support.is_operator_supported(op)
514
515
516def test_constraint_broadcast_shapes():
517 # BINARY CASE
518 # Only allow broadcast to 1 dim, for 1 rank index
519 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
520 assert support.is_operator_supported(op)
521 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
522 assert support.is_operator_supported(op)
523 # Only allow broadcast to 1 dim, for 3 rank indexes
524 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
525 assert support.is_operator_supported(op)
526 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
527 assert support.is_operator_supported(op)
528 # One broadcast dim not 1
529 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
530 assert not support.is_operator_supported(op)
531 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
532 assert not support.is_operator_supported(op)
533 # OFM shape dim largest ifm/ifm2 shape dim
534 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
535 assert not support.is_operator_supported(op)
536 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
537 assert not support.is_operator_supported(op)
538 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
539 assert not support.is_operator_supported(op)
540 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
541 assert not support.is_operator_supported(op)
542
543
544def create_mean(input_shape, output_shape, axis, datatype, attrs):
545 ifm = Tensor(input_shape, datatype, "in")
546 ifm.quantization = testutil.default_quant_params()
547 ofm = Tensor(output_shape, datatype, "out")
548 ofm.quantization = testutil.default_quant_params()
549 if type(axis) is list:
550 indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
551 elif type(axis) is int:
552 indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
553 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
554 return op
555
556
557def test_mean_hw_product():
558 op = create_mean([1, 64, 64, 16], [1, 16], [1, 2], DataType.uint8, {})
559 assert support.is_operator_supported(op)
560 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
561 assert not support.is_operator_supported(op)
562
563
564def test_mean_hw_product_int8():
565 op = create_mean([1, 16, 16, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
566 assert support.is_operator_supported(op)
567 op = create_mean([1, 16, 17, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
568 assert not support.is_operator_supported(op)
569
570
571def test_mean_hw_product_avgpool():
572 op = create_mean([1, 200, 200, 16], [1, 16], [1, 2], DataType.uint8, {"keep_dims": False})
573 assert support.is_operator_supported(op)
574 op = create_mean([1, 200, 200, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
575 assert not support.is_operator_supported(op)