blob: 62de0d1d58c03f55167f3260a19050a78ff37578 [file] [log] [blame]
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
20
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020021from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010022from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020023from ethosu.vela.operation import Op
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020024from ethosu.vela.supported_operators import SupportedOperators
25from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010026from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020027from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29
30support = SupportedOperators()
31
32
Michael McGeagh65fd9982020-10-20 11:49:28 +010033def test_constraint_tens_no_dynamic():
34 # Tensors cannot be dynamic (no shape, not a scalar)
35 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020036 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010037
38
39def test_constraint_tens_defined_shape():
40 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010041 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010042 assert not support.is_operator_supported(op)
43
44
Michael McGeagh65fd9982020-10-20 11:49:28 +010045def test_constraint_tens_output_scalar():
46 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010047 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010048 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010049 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010050
51
Michael McGeagh65fd9982020-10-20 11:49:28 +010052def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010053 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010054 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010055 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010056 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010057 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010058 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010059 assert not support.is_operator_supported(op)
60
61
62def test_constraint_tens_shape_size():
63 # Tensors cannot be > 4D
Michael McGeagh1f951fc2020-10-14 09:30:02 +010064 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010065 assert not support.is_operator_supported(op)
66
67
68def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010069 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010070 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010071 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010072
73
74def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010075 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010076 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010077 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert not support.is_operator_supported(op)
80
81
82def test_constraint_tens_dimension():
83 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010084 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010085 assert not support.is_operator_supported(op)
86
87
Michael McGeagh184b2502020-10-09 17:19:52 +010088def test_constraint_tens_quant_none_check():
89 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010090 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010091 assert not support.is_operator_supported(op)
92
93
94def test_constraint_tens_quant_scale():
95 # Quantization scale cannot be infinit
96 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010097 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +010099 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 assert not support.is_operator_supported(op)
101
102
Michael McGeagh37ded342020-10-01 15:37:44 +0100103def test_constraint_faf():
104 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100105 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100106 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100107 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100108
109
110def test_constraint_conv_pass():
111 # First test a simple conv passes
112 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
113 op.attrs = {"stride_w": 1, "stride_h": 1}
114 assert support.is_operator_supported(op)
115
116
117def test_constraint_stride_type():
118 # Stride width and height must be integer types
119 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
120 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
121 assert not support.is_operator_supported(op)
122
123
124def test_constraint_stride_range():
125 # Stride width and height must lie within a certain range
126 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
127 op.attrs = {"stride_w": 0, "stride_h": 20}
128 assert not support.is_operator_supported(op)
129
130
131def test_constraint_dilation_type():
132 # Dilation width and height must be integer types
133 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
134 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
135 assert not support.is_operator_supported(op)
136
137
138def test_constraint_dilation_range():
139 # Dilation width and height must lie within a certain range
140 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
141 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
142 assert not support.is_operator_supported(op)
143
144
145def test_constraint_dilated_height_range():
146 # Dilated kernel height must lie within a certain range
147 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
148 op.attrs = {"stride_w": 1, "stride_h": 1}
149 assert not support.is_operator_supported(op)
150
151
152def test_constraint_dilated_product_range():
153 # Dilated kernel width x height must lie within a certain range
154 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
155 op.attrs = {"stride_w": 1, "stride_h": 1}
156 assert not support.is_operator_supported(op)
157
158
159def test_constraint_weights_type():
160 # Weight tensor must be 8-bit
161 op = testutil.create_op_with_quant_tensors(
162 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
163 )
164 op.attrs = {"stride_w": 1, "stride_h": 1}
165 assert not support.is_operator_supported(op)
166
167
Michael McGeagh65fd9982020-10-20 11:49:28 +0100168def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100169 # Weight tensor cannot be non-const tensors
170 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
171 op.attrs = {"stride_w": 1, "stride_h": 1}
172 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100173 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100174 op.add_input_tensor(weights)
175 assert not support.is_operator_supported(op)
176
177
178def test_constraint_weights_limit():
179 # Sum of weights has a limit
180 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
181 op.attrs = {"stride_w": 1, "stride_h": 1}
182 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
183 assert not support.is_operator_supported(op)
184
185
186def test_constraint_bias_type():
187 # Bias must have a certain datatype
188 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
189 op.attrs = {"stride_w": 1, "stride_h": 1}
190 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
191 op.add_input_tensor(bias)
192 assert not support.is_operator_supported(op)
193
194
195def test_constraint_bias_40bit():
196 # Bias must not exceed 40-bit
197 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
198 op.attrs = {"stride_w": 1, "stride_h": 1}
199 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100200 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100201 op.add_input_tensor(bias)
202 assert not support.is_operator_supported(op)
203
204
205def test_constraint_batch_size():
206 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
207 op.attrs = {"stride_w": 1, "stride_h": 1}
208 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100209
210
211def test_constraint_quant_scale_inf():
212 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
213 op.ofm.quantization.scale_f32 = np.float32(1e-39)
214 assert not support.is_operator_supported(op)
215
216
217def test_constraint_depth_multiplier():
218 # Valid. Depth multiplier is 1 so no further constraints
219 op = testutil.create_op_with_quant_tensors(
220 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
221 )
222 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
223 assert support.is_operator_supported(op)
224 # Invalid. Depth multiplier doesnt equal ofm channel
225 op = testutil.create_op_with_quant_tensors(
226 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
227 )
228 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
229 assert not support.is_operator_supported(op)
230 # Valid. Depth multiplier is equal to ofm channel
231 op = testutil.create_op_with_quant_tensors(
232 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
233 )
234 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
235 assert support.is_operator_supported(op)
236
237
238def test_constraint_tconv_stride():
239 # Strides must be 2
240 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
241 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
242 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
243 ifm.quantization = testutil.default_quant_params()
244 op.add_input_tensor(ifm)
245 assert not support.is_operator_supported(op)
246
247
248def test_constraint_tconv_same():
249 # Valid
250 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
251 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
252 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
253 ifm.quantization = testutil.default_quant_params()
254 op.add_input_tensor(ifm)
255 assert support.is_operator_supported(op)
256 # Invalid
257 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
258 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
259 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
260 ifm.quantization = testutil.default_quant_params()
261 op.add_input_tensor(ifm)
262 assert not support.is_operator_supported(op)
263
264
265def test_constraint_tconv_valid():
266 # Valid
267 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
268 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
269 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
270 ifm.quantization = testutil.default_quant_params()
271 op.add_input_tensor(ifm)
272 assert support.is_operator_supported(op)
273 # Invalid
274 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
275 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
276 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
277 ifm.quantization = testutil.default_quant_params()
278 op.add_input_tensor(ifm)
279 assert not support.is_operator_supported(op)
280
281
282def test_constraint_matching_in_out_types():
283 # Valid
284 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
285 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"}
286 assert support.is_operator_supported(op)
287 # Invalid. datatypes for ifm and ofm must match (default uint8)
288 op.ifm.dtype = DataType.int8
289 assert not support.is_operator_supported(op)
290
291
292def test_constraint_filter_type():
293 # Filter width/height must be integers
294 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
295 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"}
296 assert not support.is_operator_supported(op)
297
298
299def test_constraint_filter_range():
300 # Avg pool restrictions are dependent on padding:
301 # SAME padding restricts both W and H to max 8
302 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
303 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"}
304 assert not support.is_operator_supported(op)
305 # VALID padding limits are much larger
306 op.attrs["padding"] = b"VALID"
307 assert support.is_operator_supported(op)
308
309
310def test_constraint_filter_height_range_valid_pad():
311 # Avg pool restrictions are dependent on padding:
312 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
313 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"VALID"}
314 assert support.is_operator_supported(op)
315 # VALID padding restricts to 256 in filter height
316 op.attrs["filter_height"] = 257
317 assert not support.is_operator_supported(op)
318
319
320def test_constraint_filter_product_height_range_valid_pad():
321 # Avg pool restrictions are dependent on padding:
322 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
323 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"}
324 assert support.is_operator_supported(op)
325 # VALID padding restricts filter W x H to 256x256
326 op.attrs["filter_width"] = 257
327 assert not support.is_operator_supported(op)
328
329
330def test_constraint_filter_height_range():
331 # Max pool restrictions arent dependent on padding
332 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
333 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"SAME"}
334 assert support.is_operator_supported(op)
335 # Restricts to 256 in filter height
336 op.attrs["filter_height"] = 257
337 assert not support.is_operator_supported(op)
338 # Doesnt matter if SAME or VALID
339 op.attrs["padding"] = b"VALID"
340 assert not support.is_operator_supported(op)
341
342
343def test_constraint_filter_product_height_range():
344 # Max pool restrictions arent dependent on padding
345 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
346 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"}
347 assert support.is_operator_supported(op)
348 # Restricts filter W x H to 256x256
349 op.attrs["filter_width"] = 257
350 assert not support.is_operator_supported(op)
351 # Doesnt matter if SAME or VALID
352 op.attrs["padding"] = b"VALID"
353 assert not support.is_operator_supported(op)
354
355
356def test_constraint_resize():
357 # IFM W and H == 1
358 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
359 assert support.is_operator_supported(op)
360 # IFM == OFM
361 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
362 assert support.is_operator_supported(op)
363 # IFM x2 == OFM ; align_corners = False
364 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
365 assert support.is_operator_supported(op)
366 # IFM x2 -1 == OFM ; align_corners = True
367 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
368 op.attrs["align_corners"] = True
369 assert support.is_operator_supported(op)
370 # Invalid cases
371 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
372 assert not support.is_operator_supported(op)
373 op.attrs["align_corners"] = True
374 assert not support.is_operator_supported(op)
375
376
377def test_constraint_matching_shapes():
378 # Softmax requires the ifm and ofm shapes to match
379 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
380 assert not support.is_operator_supported(op)
381 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
382 assert support.is_operator_supported(op)
383
384
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100385def test_constraint_beta_value_range():
386 # beta must be positive
387 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
388 op.attrs["beta"] = -1.0
389 assert not support.is_operator_supported(op)
390 op.attrs["beta"] = 0.0
391 assert support.is_operator_supported(op)
392
393
Michael McGeagh65fd9982020-10-20 11:49:28 +0100394def test_constraint_splitv_inferred():
395 # SplitV requires a maximum of one inferred shape (-1)
396 qp = testutil.default_quant_params()
397 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
398 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
399 op.add_input_tensor(sizes)
400 assert not support.is_operator_supported(op)
401 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
402 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
403 op.add_input_tensor(sizes)
404 assert support.is_operator_supported(op)
405
406
407def test_constraint_concat_pass():
408 # A working concat
409 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
410 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
411 ifm2.quantization = testutil.default_quant_params()
412 op.add_input_tensor(ifm2)
413 op.attrs["axis"] = 3
414 assert support.is_operator_supported(op)
415
416
417def test_constraint_axis_exists():
418 # Missing axis attribute
419 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
420 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
421 ifm2.quantization = testutil.default_quant_params()
422 op.add_input_tensor(ifm2)
423 assert not support.is_operator_supported(op)
424
425
426def test_constraint_axis_valid():
427 # Invalid axis attribute
428 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
429 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
430 ifm2.quantization = testutil.default_quant_params()
431 op.add_input_tensor(ifm2)
432 op.attrs["axis"] = 7
433 assert not support.is_operator_supported(op)
434
435
436def test_constraint_matching_dimensionality():
437 # Mismatching dimensionality: 4D+2D=4D
438 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
439 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
440 ifm2.quantization = testutil.default_quant_params()
441 op.add_input_tensor(ifm2)
442 op.attrs["axis"] = 3
443 assert not support.is_operator_supported(op)
444
445
446def test_constraint_valid_dimensions():
447 # Mismatching dimension value:
448 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
449 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
450 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
451 ifm2.quantization = testutil.default_quant_params()
452 op.add_input_tensor(ifm2)
453 op.attrs["axis"] = 3
454 assert not support.is_operator_supported(op)
455
456
457def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
458 qp = testutil.default_quant_params()
459 in0 = Tensor(in_shape, DataType.uint8, "in")
460 in0.quantization = qp
461 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
462 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
463 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
464 out = Tensor(out_shape, DataType.uint8, "out")
465 out.quantization = qp
466 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
467 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
468
469
470def create_strided_slice():
471 # Creates a valid strided slice operator with some valid inputs/outputs
472 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
473 op.attrs["begin_mask"] = 1
474 op.attrs["end_mask"] = 9
475 assert support.is_operator_supported(op)
476 return op
477
478
479def test_constraint_stridedslice_input_count():
480 # Wrong number of input tensors
481 op = create_strided_slice()
482 op.add_input_tensor(op.inputs[0].clone())
483 assert not support.is_operator_supported(op)
484
485
486def test_constraint_stridedslice_inputs_const():
487 # begin, end, stride values must not be None
488 op = create_strided_slice()
489 op.inputs[1].values = None
490 assert not support.is_operator_supported(op)
491 op = create_strided_slice()
492 op.inputs[2].values = None
493 assert not support.is_operator_supported(op)
494 op = create_strided_slice()
495 op.inputs[3].values = None
496 assert not support.is_operator_supported(op)
497
498
Michael McGeagh65fd9982020-10-20 11:49:28 +0100499def test_constraint_stridedslice_stride_values():
500 # Unsupported strides
501 op = create_strided_slice()
502 op.inputs[3].values = [1, 1, 2, 1]
503 assert not support.is_operator_supported(op)
504
505
506def test_constraint_ellipsis_mask():
507 # Unsupported ellipsis mask
508 op = create_strided_slice()
509 op.attrs["ellipsis_mask"] = 1
510 assert not support.is_operator_supported(op)
511
512
513def test_constraint_axis_masks():
514 op = create_strided_slice()
515 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
516 op.attrs["new_axis_mask"] = 2
517 assert support.is_operator_supported(op)
518 op = create_strided_slice()
519 op.attrs["shrink_axis_mask"] = 3
520 assert support.is_operator_supported(op)
521 # But setting both to non-zero is not supported
522 op.attrs["new_axis_mask"] = 2
523 assert not support.is_operator_supported(op)
524
525
526def test_constraint_slice_ranges():
527 # Examples where end offset <= begin offset
528 op = create_strided_slice()
529 op.inputs[1].values = [0, 7, 2, 0]
530 assert not support.is_operator_supported(op)
531 op = create_strided_slice()
532 op.inputs[2].values = [0, 7, 2, 0]
533 assert not support.is_operator_supported(op)
534 op = create_strided_slice()
535 op.attrs["begin_mask"] = 0
536 assert not support.is_operator_supported(op)
537 op = create_strided_slice()
538 op.attrs["end_mask"] = 0
539 assert not support.is_operator_supported(op)
540
541
542def test_constraint_matching_inputs_types():
543 # input data types must match (default is uint8)
544 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
545 op.ifm2.dtype = DataType.int8
546 assert not support.is_operator_supported(op)
547
548
549def test_constraint_matching_signed():
550 # signed inputs require output to also be signed
551 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
552 op.ofm.dtype = DataType.uint8
553 assert not support.is_operator_supported(op)
554
555
556def test_constraint_unsigned_valid():
557 # unsigned inputs require output to be either:
558 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
559 # the same (default uint8)
560 assert support.is_operator_supported(op)
561 op.ofm.dtype = DataType.int8
562 assert not support.is_operator_supported(op)
563 op.ofm.dtype = DataType.int16
564 assert not support.is_operator_supported(op)
565 # or int32
566 op.ofm.dtype = DataType.int32
567 assert support.is_operator_supported(op)
568
569
570def test_constraint_inputs_int32():
571 # both inputs must be type int32
572 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
573 assert not support.is_operator_supported(op)
574 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
575 assert support.is_operator_supported(op)
576 op.ifm2.dtype = DataType.int16
577 assert not support.is_operator_supported(op)
578
579
580def test_constraint_output_int32():
581 # output must be type int32
582 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
583 assert support.is_operator_supported(op)
584 op.ofm.dtype = DataType.int16
585 assert not support.is_operator_supported(op)
586
587
588def test_constraint_matching_quantization_parameters():
589 qp = QuantizationParameters()
590 qp.scale_f32 = np.float32(1.5)
591 qp.zero_point = 128
592 # valid - all matching (uses default quant params)
593 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
594 assert support.is_operator_supported(op)
595 # invalid - ifm mismatch ofm
596 op.ifm.quantization = qp
597 assert not support.is_operator_supported(op)
598 # invalid - ifm2 mismatch ofm
599 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
600 op.ifm2.quantization = qp
601 assert not support.is_operator_supported(op)
602 # invalid - both ifm and ifm2 mismatch ofm
603 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
604 op.ifm.quantization = qp
605 op.ifm2.quantization = qp
606 assert not support.is_operator_supported(op)
607 # valid - all matching
608 op.ofm.quantization = qp
609 assert support.is_operator_supported(op)
610
611
612def test_constraint_elemwise_batch_size():
613 # BINARY CASE
614 # Batch can be >1 if dims is <=2D
615 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
616 assert support.is_operator_supported(op)
617 # For dims >2D, batch must be 1
618 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
619 assert support.is_operator_supported(op)
620 # invalid case
621 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
622 assert not support.is_operator_supported(op)
623
624 # UNARY CASE
625 # Batch can be >1 if dims is <=2D
626 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
627 assert support.is_operator_supported(op)
628 # For dims >2D, batch must be 1
629 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
630 assert support.is_operator_supported(op)
631 # invalid case
632 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
633 assert not support.is_operator_supported(op)
634
635
636def test_constraint_matching_either_shapes():
637 # BINARY CASE
638 # At least one ifm shape must match ofm's shape
639 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [4, 4], [2, 2])
640 assert support.is_operator_supported(op)
641 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [2, 2], [2, 2])
642 assert support.is_operator_supported(op)
643 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
644 assert not support.is_operator_supported(op)
645
646 # UNARY CASE
647 # No second input so this is treated the same as requiring ifm shape to match ofm shape
648 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
649 assert support.is_operator_supported(op)
650 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
651 assert not support.is_operator_supported(op)
652
653
654def test_constraint_alpha_valid():
655 # Alpha cannot be negative
656 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
657 op.attrs["alpha"] = 0
658 assert support.is_operator_supported(op)
659 op.attrs["alpha"] = -1
660 assert not support.is_operator_supported(op)