blob: 7e13f42dc4461bb886be20208d2b99b34cd35b33 [file] [log] [blame]
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
20
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020021from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010022from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020023from ethosu.vela.operation import Op
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020024from ethosu.vela.supported_operators import SupportedOperators
25from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010026from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020027from ethosu.vela.tensor import Tensor
28from ethosu.vela.test import testutil
29
30support = SupportedOperators()
31
32
Michael McGeagh65fd9982020-10-20 11:49:28 +010033def test_constraint_tens_no_dynamic():
34 # Tensors cannot be dynamic (no shape, not a scalar)
35 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020036 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010037
38
39def test_constraint_tens_defined_shape():
40 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010041 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010042 assert not support.is_operator_supported(op)
43
44
Michael McGeagh65fd9982020-10-20 11:49:28 +010045def test_constraint_tens_output_scalar():
46 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010047 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010048 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010049 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010050
51
Michael McGeagh65fd9982020-10-20 11:49:28 +010052def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010053 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010054 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010055 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010056 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010057 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010058 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010059 assert not support.is_operator_supported(op)
60
61
62def test_constraint_tens_shape_size():
63 # Tensors cannot be > 4D
Michael McGeagh1f951fc2020-10-14 09:30:02 +010064 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010065 assert not support.is_operator_supported(op)
66
67
68def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010069 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010070 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010071 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010072
73
74def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010075 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010076 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010077 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert not support.is_operator_supported(op)
80
81
82def test_constraint_tens_dimension():
83 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010084 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010085 assert not support.is_operator_supported(op)
86
87
Michael McGeagh184b2502020-10-09 17:19:52 +010088def test_constraint_tens_quant_none_check():
89 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010090 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010091 assert not support.is_operator_supported(op)
92
93
94def test_constraint_tens_quant_scale():
95 # Quantization scale cannot be infinit
96 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010097 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +010099 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 assert not support.is_operator_supported(op)
101
102
Michael McGeagh37ded342020-10-01 15:37:44 +0100103def test_constraint_faf():
104 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100105 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100106 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100107 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100108
109
110def test_constraint_conv_pass():
111 # First test a simple conv passes
112 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
113 op.attrs = {"stride_w": 1, "stride_h": 1}
114 assert support.is_operator_supported(op)
115
116
117def test_constraint_stride_type():
118 # Stride width and height must be integer types
119 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
120 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
121 assert not support.is_operator_supported(op)
122
123
124def test_constraint_stride_range():
125 # Stride width and height must lie within a certain range
126 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
127 op.attrs = {"stride_w": 0, "stride_h": 20}
128 assert not support.is_operator_supported(op)
129
130
131def test_constraint_dilation_type():
132 # Dilation width and height must be integer types
133 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
134 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
135 assert not support.is_operator_supported(op)
136
137
138def test_constraint_dilation_range():
139 # Dilation width and height must lie within a certain range
140 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
141 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
142 assert not support.is_operator_supported(op)
143
144
145def test_constraint_dilated_height_range():
146 # Dilated kernel height must lie within a certain range
147 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
148 op.attrs = {"stride_w": 1, "stride_h": 1}
149 assert not support.is_operator_supported(op)
150
151
152def test_constraint_dilated_product_range():
153 # Dilated kernel width x height must lie within a certain range
154 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
155 op.attrs = {"stride_w": 1, "stride_h": 1}
156 assert not support.is_operator_supported(op)
157
158
159def test_constraint_weights_type():
160 # Weight tensor must be 8-bit
161 op = testutil.create_op_with_quant_tensors(
162 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
163 )
164 op.attrs = {"stride_w": 1, "stride_h": 1}
165 assert not support.is_operator_supported(op)
166
167
Michael McGeagh65fd9982020-10-20 11:49:28 +0100168def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100169 # Weight tensor cannot be non-const tensors
170 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
171 op.attrs = {"stride_w": 1, "stride_h": 1}
172 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100173 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100174 op.add_input_tensor(weights)
175 assert not support.is_operator_supported(op)
176
177
178def test_constraint_weights_limit():
179 # Sum of weights has a limit
180 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
181 op.attrs = {"stride_w": 1, "stride_h": 1}
182 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
183 assert not support.is_operator_supported(op)
184
185
186def test_constraint_bias_type():
187 # Bias must have a certain datatype
188 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
189 op.attrs = {"stride_w": 1, "stride_h": 1}
190 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
191 op.add_input_tensor(bias)
192 assert not support.is_operator_supported(op)
193
194
195def test_constraint_bias_40bit():
196 # Bias must not exceed 40-bit
197 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
198 op.attrs = {"stride_w": 1, "stride_h": 1}
199 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100200 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100201 op.add_input_tensor(bias)
202 assert not support.is_operator_supported(op)
203
204
205def test_constraint_batch_size():
206 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
207 op.attrs = {"stride_w": 1, "stride_h": 1}
208 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100209
210
211def test_constraint_quant_scale_inf():
212 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
213 op.ofm.quantization.scale_f32 = np.float32(1e-39)
214 assert not support.is_operator_supported(op)
215
216
217def test_constraint_depth_multiplier():
218 # Valid. Depth multiplier is 1 so no further constraints
219 op = testutil.create_op_with_quant_tensors(
220 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
221 )
222 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
223 assert support.is_operator_supported(op)
224 # Invalid. Depth multiplier doesnt equal ofm channel
225 op = testutil.create_op_with_quant_tensors(
226 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
227 )
228 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
229 assert not support.is_operator_supported(op)
230 # Valid. Depth multiplier is equal to ofm channel
231 op = testutil.create_op_with_quant_tensors(
232 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
233 )
234 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
235 assert support.is_operator_supported(op)
236
237
238def test_constraint_tconv_stride():
239 # Strides must be 2
240 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
241 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"}
242 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
243 ifm.quantization = testutil.default_quant_params()
244 op.add_input_tensor(ifm)
245 assert not support.is_operator_supported(op)
246
247
248def test_constraint_tconv_same():
249 # Valid
250 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
251 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
252 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
253 ifm.quantization = testutil.default_quant_params()
254 op.add_input_tensor(ifm)
255 assert support.is_operator_supported(op)
256 # Invalid
257 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
258 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"}
259 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
260 ifm.quantization = testutil.default_quant_params()
261 op.add_input_tensor(ifm)
262 assert not support.is_operator_supported(op)
263
264
265def test_constraint_tconv_valid():
266 # Valid
267 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
268 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
269 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
270 ifm.quantization = testutil.default_quant_params()
271 op.add_input_tensor(ifm)
272 assert support.is_operator_supported(op)
273 # Invalid
274 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
275 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"}
276 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
277 ifm.quantization = testutil.default_quant_params()
278 op.add_input_tensor(ifm)
279 assert not support.is_operator_supported(op)
280
281
282def test_constraint_matching_in_out_types():
283 # Valid
284 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
285 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"}
286 assert support.is_operator_supported(op)
287 # Invalid. datatypes for ifm and ofm must match (default uint8)
288 op.ifm.dtype = DataType.int8
289 assert not support.is_operator_supported(op)
290
291
292def test_constraint_filter_type():
293 # Filter width/height must be integers
294 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
295 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"}
296 assert not support.is_operator_supported(op)
297
298
299def test_constraint_filter_range():
300 # Avg pool restrictions are dependent on padding:
301 # SAME padding restricts both W and H to max 8
302 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
303 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"}
304 assert not support.is_operator_supported(op)
305 # VALID padding limits are much larger
306 op.attrs["padding"] = b"VALID"
307 assert support.is_operator_supported(op)
308
309
310def test_constraint_filter_height_range_valid_pad():
311 # Avg pool restrictions are dependent on padding:
312 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
313 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"VALID"}
314 assert support.is_operator_supported(op)
315 # VALID padding restricts to 256 in filter height
316 op.attrs["filter_height"] = 257
317 assert not support.is_operator_supported(op)
318
319
320def test_constraint_filter_product_height_range_valid_pad():
321 # Avg pool restrictions are dependent on padding:
322 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
323 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"}
324 assert support.is_operator_supported(op)
325 # VALID padding restricts filter W x H to 256x256
326 op.attrs["filter_width"] = 257
327 assert not support.is_operator_supported(op)
328
329
330def test_constraint_filter_height_range():
331 # Max pool restrictions arent dependent on padding
332 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
333 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"SAME"}
334 assert support.is_operator_supported(op)
335 # Restricts to 256 in filter height
336 op.attrs["filter_height"] = 257
337 assert not support.is_operator_supported(op)
338 # Doesnt matter if SAME or VALID
339 op.attrs["padding"] = b"VALID"
340 assert not support.is_operator_supported(op)
341
342
343def test_constraint_filter_product_height_range():
344 # Max pool restrictions arent dependent on padding
345 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
346 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"}
347 assert support.is_operator_supported(op)
348 # Restricts filter W x H to 256x256
349 op.attrs["filter_width"] = 257
350 assert not support.is_operator_supported(op)
351 # Doesnt matter if SAME or VALID
352 op.attrs["padding"] = b"VALID"
353 assert not support.is_operator_supported(op)
354
355
356def test_constraint_resize():
357 # IFM W and H == 1
358 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
359 assert support.is_operator_supported(op)
360 # IFM == OFM
361 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
362 assert support.is_operator_supported(op)
363 # IFM x2 == OFM ; align_corners = False
364 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
365 assert support.is_operator_supported(op)
366 # IFM x2 -1 == OFM ; align_corners = True
367 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
368 op.attrs["align_corners"] = True
369 assert support.is_operator_supported(op)
370 # Invalid cases
371 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
372 assert not support.is_operator_supported(op)
373 op.attrs["align_corners"] = True
374 assert not support.is_operator_supported(op)
375
376
377def test_constraint_matching_shapes():
378 # Softmax requires the ifm and ofm shapes to match
379 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
380 assert not support.is_operator_supported(op)
381 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
382 assert support.is_operator_supported(op)
383
384
385def test_constraint_splitv_inferred():
386 # SplitV requires a maximum of one inferred shape (-1)
387 qp = testutil.default_quant_params()
388 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
389 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
390 op.add_input_tensor(sizes)
391 assert not support.is_operator_supported(op)
392 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
393 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
394 op.add_input_tensor(sizes)
395 assert support.is_operator_supported(op)
396
397
398def test_constraint_concat_pass():
399 # A working concat
400 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
401 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
402 ifm2.quantization = testutil.default_quant_params()
403 op.add_input_tensor(ifm2)
404 op.attrs["axis"] = 3
405 assert support.is_operator_supported(op)
406
407
408def test_constraint_axis_exists():
409 # Missing axis attribute
410 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
411 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
412 ifm2.quantization = testutil.default_quant_params()
413 op.add_input_tensor(ifm2)
414 assert not support.is_operator_supported(op)
415
416
417def test_constraint_axis_valid():
418 # Invalid axis attribute
419 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
420 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
421 ifm2.quantization = testutil.default_quant_params()
422 op.add_input_tensor(ifm2)
423 op.attrs["axis"] = 7
424 assert not support.is_operator_supported(op)
425
426
427def test_constraint_matching_dimensionality():
428 # Mismatching dimensionality: 4D+2D=4D
429 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
430 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
431 ifm2.quantization = testutil.default_quant_params()
432 op.add_input_tensor(ifm2)
433 op.attrs["axis"] = 3
434 assert not support.is_operator_supported(op)
435
436
437def test_constraint_valid_dimensions():
438 # Mismatching dimension value:
439 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
440 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
441 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
442 ifm2.quantization = testutil.default_quant_params()
443 op.add_input_tensor(ifm2)
444 op.attrs["axis"] = 3
445 assert not support.is_operator_supported(op)
446
447
448def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
449 qp = testutil.default_quant_params()
450 in0 = Tensor(in_shape, DataType.uint8, "in")
451 in0.quantization = qp
452 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
453 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
454 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
455 out = Tensor(out_shape, DataType.uint8, "out")
456 out.quantization = qp
457 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
458 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
459
460
461def create_strided_slice():
462 # Creates a valid strided slice operator with some valid inputs/outputs
463 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
464 op.attrs["begin_mask"] = 1
465 op.attrs["end_mask"] = 9
466 assert support.is_operator_supported(op)
467 return op
468
469
470def test_constraint_stridedslice_input_count():
471 # Wrong number of input tensors
472 op = create_strided_slice()
473 op.add_input_tensor(op.inputs[0].clone())
474 assert not support.is_operator_supported(op)
475
476
477def test_constraint_stridedslice_inputs_const():
478 # begin, end, stride values must not be None
479 op = create_strided_slice()
480 op.inputs[1].values = None
481 assert not support.is_operator_supported(op)
482 op = create_strided_slice()
483 op.inputs[2].values = None
484 assert not support.is_operator_supported(op)
485 op = create_strided_slice()
486 op.inputs[3].values = None
487 assert not support.is_operator_supported(op)
488
489
Michael McGeagh65fd9982020-10-20 11:49:28 +0100490def test_constraint_stridedslice_stride_values():
491 # Unsupported strides
492 op = create_strided_slice()
493 op.inputs[3].values = [1, 1, 2, 1]
494 assert not support.is_operator_supported(op)
495
496
497def test_constraint_ellipsis_mask():
498 # Unsupported ellipsis mask
499 op = create_strided_slice()
500 op.attrs["ellipsis_mask"] = 1
501 assert not support.is_operator_supported(op)
502
503
504def test_constraint_axis_masks():
505 op = create_strided_slice()
506 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
507 op.attrs["new_axis_mask"] = 2
508 assert support.is_operator_supported(op)
509 op = create_strided_slice()
510 op.attrs["shrink_axis_mask"] = 3
511 assert support.is_operator_supported(op)
512 # But setting both to non-zero is not supported
513 op.attrs["new_axis_mask"] = 2
514 assert not support.is_operator_supported(op)
515
516
517def test_constraint_slice_ranges():
518 # Examples where end offset <= begin offset
519 op = create_strided_slice()
520 op.inputs[1].values = [0, 7, 2, 0]
521 assert not support.is_operator_supported(op)
522 op = create_strided_slice()
523 op.inputs[2].values = [0, 7, 2, 0]
524 assert not support.is_operator_supported(op)
525 op = create_strided_slice()
526 op.attrs["begin_mask"] = 0
527 assert not support.is_operator_supported(op)
528 op = create_strided_slice()
529 op.attrs["end_mask"] = 0
530 assert not support.is_operator_supported(op)
531
532
533def test_constraint_matching_inputs_types():
534 # input data types must match (default is uint8)
535 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
536 op.ifm2.dtype = DataType.int8
537 assert not support.is_operator_supported(op)
538
539
540def test_constraint_matching_signed():
541 # signed inputs require output to also be signed
542 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
543 op.ofm.dtype = DataType.uint8
544 assert not support.is_operator_supported(op)
545
546
547def test_constraint_unsigned_valid():
548 # unsigned inputs require output to be either:
549 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
550 # the same (default uint8)
551 assert support.is_operator_supported(op)
552 op.ofm.dtype = DataType.int8
553 assert not support.is_operator_supported(op)
554 op.ofm.dtype = DataType.int16
555 assert not support.is_operator_supported(op)
556 # or int32
557 op.ofm.dtype = DataType.int32
558 assert support.is_operator_supported(op)
559
560
561def test_constraint_inputs_int32():
562 # both inputs must be type int32
563 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
564 assert not support.is_operator_supported(op)
565 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
566 assert support.is_operator_supported(op)
567 op.ifm2.dtype = DataType.int16
568 assert not support.is_operator_supported(op)
569
570
571def test_constraint_output_int32():
572 # output must be type int32
573 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
574 assert support.is_operator_supported(op)
575 op.ofm.dtype = DataType.int16
576 assert not support.is_operator_supported(op)
577
578
579def test_constraint_matching_quantization_parameters():
580 qp = QuantizationParameters()
581 qp.scale_f32 = np.float32(1.5)
582 qp.zero_point = 128
583 # valid - all matching (uses default quant params)
584 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
585 assert support.is_operator_supported(op)
586 # invalid - ifm mismatch ofm
587 op.ifm.quantization = qp
588 assert not support.is_operator_supported(op)
589 # invalid - ifm2 mismatch ofm
590 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
591 op.ifm2.quantization = qp
592 assert not support.is_operator_supported(op)
593 # invalid - both ifm and ifm2 mismatch ofm
594 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
595 op.ifm.quantization = qp
596 op.ifm2.quantization = qp
597 assert not support.is_operator_supported(op)
598 # valid - all matching
599 op.ofm.quantization = qp
600 assert support.is_operator_supported(op)
601
602
603def test_constraint_elemwise_batch_size():
604 # BINARY CASE
605 # Batch can be >1 if dims is <=2D
606 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
607 assert support.is_operator_supported(op)
608 # For dims >2D, batch must be 1
609 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
610 assert support.is_operator_supported(op)
611 # invalid case
612 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
613 assert not support.is_operator_supported(op)
614
615 # UNARY CASE
616 # Batch can be >1 if dims is <=2D
617 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
618 assert support.is_operator_supported(op)
619 # For dims >2D, batch must be 1
620 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
621 assert support.is_operator_supported(op)
622 # invalid case
623 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
624 assert not support.is_operator_supported(op)
625
626
627def test_constraint_matching_either_shapes():
628 # BINARY CASE
629 # At least one ifm shape must match ofm's shape
630 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [4, 4], [2, 2])
631 assert support.is_operator_supported(op)
632 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [2, 2], [2, 2])
633 assert support.is_operator_supported(op)
634 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
635 assert not support.is_operator_supported(op)
636
637 # UNARY CASE
638 # No second input so this is treated the same as requiring ifm shape to match ofm shape
639 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
640 assert support.is_operator_supported(op)
641 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
642 assert not support.is_operator_supported(op)
643
644
645def test_constraint_alpha_valid():
646 # Alpha cannot be negative
647 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
648 op.attrs["alpha"] = 0
649 assert support.is_operator_supported(op)
650 op.attrs["alpha"] = -1
651 assert not support.is_operator_supported(op)