blob: cd331fd946f0fc21eb66f60c2d4176441e763639 [file] [log] [blame]
Louis Verhaardebf4af62021-01-27 15:57:57 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Louis Verhaardfa2f92a2020-09-21 11:56:18 +02002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17# Description:
18# Unit tests for support_operators
Michael McGeagh37ded342020-10-01 15:37:44 +010019import numpy as np
Louis Verhaardebf4af62021-01-27 15:57:57 +010020import pytest
Michael McGeagh37ded342020-10-01 15:37:44 +010021
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020022from ethosu.vela.data_type import DataType
Louis Verhaarde8a5a782020-11-02 18:04:27 +010023from ethosu.vela.operation import ActivationFunction
Louis Verhaardaee5d752020-09-30 09:01:52 +020024from ethosu.vela.operation import Op
Michael McGeagh16895482020-12-14 15:51:20 +000025from ethosu.vela.operation import Padding
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020026from ethosu.vela.supported_operators import SupportedOperators
27from ethosu.vela.tensor import create_const_tensor
Michael McGeagh37ded342020-10-01 15:37:44 +010028from ethosu.vela.tensor import QuantizationParameters
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020029from ethosu.vela.tensor import Tensor
30from ethosu.vela.test import testutil
31
32support = SupportedOperators()
33
34
Michael McGeagh65fd9982020-10-20 11:49:28 +010035def test_constraint_tens_no_dynamic():
36 # Tensors cannot be dynamic (no shape, not a scalar)
37 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [])
Louis Verhaardfa2f92a2020-09-21 11:56:18 +020038 assert not support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010039
40
41def test_constraint_tens_defined_shape():
42 # Tensors cannot have None in them
Michael McGeagh1f951fc2020-10-14 09:30:02 +010043 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, None, 8], [1, 8, 8, 8])
Michael McGeagh37ded342020-10-01 15:37:44 +010044 assert not support.is_operator_supported(op)
45
46
Michael McGeagh65fd9982020-10-20 11:49:28 +010047def test_constraint_tens_output_scalar():
48 # Scalar output is not allowed at all:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010049 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [])
Michael McGeagh65fd9982020-10-20 11:49:28 +010050 op.ofm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010051 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010052
53
Michael McGeagh65fd9982020-10-20 11:49:28 +010054def test_constraint_tens_input_scalar():
Michael McGeagh184b2502020-10-09 17:19:52 +010055 # Shapeless input is allowed if its of a certain type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010056 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8])
Michael McGeagh184b2502020-10-09 17:19:52 +010057 assert support.is_operator_supported(op)
Michael McGeagh37ded342020-10-01 15:37:44 +010058 # Invalid shapeless input due to op type:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010059 op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8])
Michael McGeagh65fd9982020-10-20 11:49:28 +010060 op.ifm.values = 0.5
Michael McGeagh37ded342020-10-01 15:37:44 +010061 assert not support.is_operator_supported(op)
62
63
64def test_constraint_tens_shape_size():
65 # Tensors cannot be > 4D
patrik.gustavssoneeb85152020-12-21 17:10:40 +000066 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8], set_ifm_ofm_shapes=False)
Michael McGeagh37ded342020-10-01 15:37:44 +010067 assert not support.is_operator_supported(op)
68
69
70def test_constraint_tens_dtype():
Michael McGeagh184b2502020-10-09 17:19:52 +010071 # Tensors can only be of type uint8, int8, int16 and int32
Michael McGeagh1f951fc2020-10-14 09:30:02 +010072 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.float32)
Michael McGeagh37ded342020-10-01 15:37:44 +010073 assert not support.is_operator_supported(op)
Michael McGeagh184b2502020-10-09 17:19:52 +010074
75
76def test_constraint_tens_int32_ops():
Michael McGeagh37ded342020-10-01 15:37:44 +010077 # For int32, only select op types are allowed:
Michael McGeagh1f951fc2020-10-14 09:30:02 +010078 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010079 assert support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +010080 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
Michael McGeagh37ded342020-10-01 15:37:44 +010081 assert not support.is_operator_supported(op)
82
83
84def test_constraint_tens_dimension():
85 # Tensors can only have values in the inclusive range of 1-65535
Michael McGeagh1f951fc2020-10-14 09:30:02 +010086 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 0], [1, 8, 8, 65536])
Michael McGeagh37ded342020-10-01 15:37:44 +010087 assert not support.is_operator_supported(op)
88
89
Michael McGeagh184b2502020-10-09 17:19:52 +010090def test_constraint_tens_quant_none_check():
91 # Tensors must have quantization parameters
Michael McGeagh1f951fc2020-10-14 09:30:02 +010092 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
Michael McGeagh184b2502020-10-09 17:19:52 +010093 assert not support.is_operator_supported(op)
94
95
96def test_constraint_tens_quant_scale():
Louis Verhaard9a0cff12021-01-08 11:17:33 +010097 # Quantization scale cannot be infinite
Michael McGeagh184b2502020-10-09 17:19:52 +010098 qp = QuantizationParameters()
Michael McGeagh65fd9982020-10-20 11:49:28 +010099 qp.zero_point = 0
Michael McGeagh184b2502020-10-09 17:19:52 +0100100 qp.scale_f32 = np.inf
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100101 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
Michael McGeagh184b2502020-10-09 17:19:52 +0100102 assert not support.is_operator_supported(op)
103
104
Dwight Lidmanc7187432020-11-16 17:40:46 +0100105def test_constraint_tens_quant_per_axis_not_supp():
106 # Quantization scale cannot be array-valued for elemwise ops
107 qp = QuantizationParameters()
108 qp.zero_point = np.zeros((1, 3))
109 qp.scale_f32 = np.ones((1, 3))
110 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
111 assert not support.is_operator_supported(op)
112
113
114def test_constraint_tens_quant_per_axis_is_supp():
115 op = testutil.create_op_with_quant_tensors(
116 Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
117 )
118 op.attrs = {"stride_w": 1, "stride_h": 1}
119 assert support.is_operator_supported(op)
120 qp = QuantizationParameters()
121 qp.zero_point = np.zeros((1, 3))
122 qp.scale_f32 = np.ones((1, 3))
123 op.bias.quantization = qp
124 assert support.is_operator_supported(op)
125
126
Dwight Lidman0dd21c72020-11-24 13:45:50 +0100127def test_constraint_fc_output_2d_not_supp():
128 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1], [3, 2, 2, 1], weights_shape=[12, 1, 1, 1])
129 assert not support.is_operator_supported(op)
130 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [12, 1, 1, 1], [1, 3, 4], weights_shape=[12, 1, 1, 1])
131 assert not support.is_operator_supported(op)
132 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1, 1, 1], [1], weights_shape=[1, 1, 1, 1])
133 assert not support.is_operator_supported(op)
134
135
136def test_constraint_fc_output_2d_is_supp():
137 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
138 assert support.is_operator_supported(op)
139 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [1, 1024], [16, 64], weights_shape=[1, 1024])
140 assert support.is_operator_supported(op)
141
142
Michael McGeagh37ded342020-10-01 15:37:44 +0100143def test_constraint_faf():
144 # Fused activation functions, if set, must be a valid op type
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100145 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100146 op.activation = ActivationFunction(Op.Conv2D)
Michael McGeagh37ded342020-10-01 15:37:44 +0100147 assert not support.is_operator_supported(op)
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100148
149
Louis Verhaardc7761512021-02-03 10:22:38 +0100150def test_constraint_faf_ofm_dtype():
151 # If fused activation function is present, OFM must be 8 or 16 bit
152 shp = [1, 8, 8, 8]
153 for dtype in [DataType.int8, DataType.uint8, DataType.int16, DataType.int32]:
154 op = testutil.create_elemwise_op(Op.Add, "op", shp, shp, shp, datatype=dtype)
155 op.activation = ActivationFunction(Op.Relu)
156 expected = dtype.size_in_bytes() <= 2
157 assert support.is_operator_supported(op) == expected, f"Data type: {dtype}"
158
159
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100160def test_constraint_conv_pass():
161 # First test a simple conv passes
162 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
163 op.attrs = {"stride_w": 1, "stride_h": 1}
164 assert support.is_operator_supported(op)
165
166
167def test_constraint_stride_type():
168 # Stride width and height must be integer types
169 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
170 op.attrs = {"stride_w": 1.5, "stride_h": "1"}
171 assert not support.is_operator_supported(op)
172
173
174def test_constraint_stride_range():
175 # Stride width and height must lie within a certain range
176 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
177 op.attrs = {"stride_w": 0, "stride_h": 20}
178 assert not support.is_operator_supported(op)
179
180
181def test_constraint_dilation_type():
182 # Dilation width and height must be integer types
183 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
184 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 1.5, "dilation_h_factor": "1"}
185 assert not support.is_operator_supported(op)
186
187
188def test_constraint_dilation_range():
189 # Dilation width and height must lie within a certain range
190 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
191 op.attrs = {"stride_w": 1, "stride_h": 1, "dilation_w_factor": 0, "dilation_h_factor": 20}
192 assert not support.is_operator_supported(op)
193
194
195def test_constraint_dilated_height_range():
196 # Dilated kernel height must lie within a certain range
197 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[65, 64, 1, 1])
198 op.attrs = {"stride_w": 1, "stride_h": 1}
199 assert not support.is_operator_supported(op)
200
201
202def test_constraint_dilated_product_range():
203 # Dilated kernel width x height must lie within a certain range
204 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[64, 65, 1, 1])
205 op.attrs = {"stride_w": 1, "stride_h": 1}
206 assert not support.is_operator_supported(op)
207
208
209def test_constraint_weights_type():
210 # Weight tensor must be 8-bit
211 op = testutil.create_op_with_quant_tensors(
212 Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1], datatype=DataType.int16
213 )
214 op.attrs = {"stride_w": 1, "stride_h": 1}
215 assert not support.is_operator_supported(op)
216
217
Michael McGeagh65fd9982020-10-20 11:49:28 +0100218def test_constraint_weights_const():
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100219 # Weight tensor cannot be non-const tensors
220 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8])
221 op.attrs = {"stride_w": 1, "stride_h": 1}
222 weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100223 weights.quantization = testutil.default_quant_params()
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100224 op.add_input_tensor(weights)
225 assert not support.is_operator_supported(op)
226
227
228def test_constraint_weights_limit():
229 # Sum of weights has a limit
230 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
231 op.attrs = {"stride_w": 1, "stride_h": 1}
232 op.weights.quantization.zero_point = np.array([[[[(127 * 65536) + 1]]]])
233 assert not support.is_operator_supported(op)
234
235
236def test_constraint_bias_type():
237 # Bias must have a certain datatype
238 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
239 op.attrs = {"stride_w": 1, "stride_h": 1}
240 bias = Tensor([1, 8, 8, 8], DataType.uint8, "bias")
241 op.add_input_tensor(bias)
242 assert not support.is_operator_supported(op)
243
244
245def test_constraint_bias_40bit():
246 # Bias must not exceed 40-bit
247 op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
248 op.attrs = {"stride_w": 1, "stride_h": 1}
249 bias = Tensor([1, 1, 1, 1], DataType.int64, "bias")
Michael McGeagh65fd9982020-10-20 11:49:28 +0100250 bias.quant_values = np.array([0x01FF_FFFF_FFFF])
Michael McGeagh1f951fc2020-10-14 09:30:02 +0100251 op.add_input_tensor(bias)
252 assert not support.is_operator_supported(op)
253
254
255def test_constraint_batch_size():
256 op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1])
257 op.attrs = {"stride_w": 1, "stride_h": 1}
258 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100259
260
261def test_constraint_quant_scale_inf():
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100262 # Test handling IFM scale/OFM scale is infinite
Michael McGeagh65fd9982020-10-20 11:49:28 +0100263 op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
Louis Verhaard9a0cff12021-01-08 11:17:33 +0100264 op.ifm.quantization.scale_f32 = np.float32(1e9)
265 op.ofm.quantization.scale_f32 = np.float32(1e-35)
266 assert not support.is_operator_supported(op)
267
268
269def test_constraint_ofm_scale_too_small():
270 # Tests handling of OFM scale < 1e-38
271 shp = [1, 10, 20, 16]
272 op = testutil.create_elemwise_op(Op.Mul, "mul", shp, shp, shp, ofm_quant=testutil.default_quant_params(),)
273 assert support.is_operator_supported(op)
274 op.ofm.quantization.scale_f32 = 1e-43
Michael McGeagh65fd9982020-10-20 11:49:28 +0100275 assert not support.is_operator_supported(op)
276
277
278def test_constraint_depth_multiplier():
279 # Valid. Depth multiplier is 1 so no further constraints
280 op = testutil.create_op_with_quant_tensors(
281 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
282 )
283 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1}
284 assert support.is_operator_supported(op)
285 # Invalid. Depth multiplier doesnt equal ofm channel
286 op = testutil.create_op_with_quant_tensors(
287 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]
288 )
289 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
290 assert not support.is_operator_supported(op)
291 # Valid. Depth multiplier is equal to ofm channel
292 op = testutil.create_op_with_quant_tensors(
293 Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1]
294 )
295 op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2}
296 assert support.is_operator_supported(op)
297
298
299def test_constraint_tconv_stride():
300 # Strides must be 2
301 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000302 op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100303 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
304 ifm.quantization = testutil.default_quant_params()
305 op.add_input_tensor(ifm)
306 assert not support.is_operator_supported(op)
307
308
309def test_constraint_tconv_same():
310 # Valid
311 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000312 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100313 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
314 ifm.quantization = testutil.default_quant_params()
315 op.add_input_tensor(ifm)
316 assert support.is_operator_supported(op)
317 # Invalid
318 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000319 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100320 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
321 ifm.quantization = testutil.default_quant_params()
322 op.add_input_tensor(ifm)
323 assert not support.is_operator_supported(op)
324
325
326def test_constraint_tconv_valid():
327 # Valid
328 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000329 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100330 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
331 ifm.quantization = testutil.default_quant_params()
332 op.add_input_tensor(ifm)
333 assert support.is_operator_supported(op)
334 # Invalid
335 op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1])
Michael McGeagh16895482020-12-14 15:51:20 +0000336 op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100337 ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
338 ifm.quantization = testutil.default_quant_params()
339 op.add_input_tensor(ifm)
340 assert not support.is_operator_supported(op)
341
342
343def test_constraint_matching_in_out_types():
344 # Valid
345 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000346 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100347 assert support.is_operator_supported(op)
348 # Invalid. datatypes for ifm and ofm must match (default uint8)
349 op.ifm.dtype = DataType.int8
350 assert not support.is_operator_supported(op)
351
352
353def test_constraint_filter_type():
354 # Filter width/height must be integers
355 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000356 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100357 assert not support.is_operator_supported(op)
358
359
360def test_constraint_filter_range():
361 # Avg pool restrictions are dependent on padding:
362 # SAME padding restricts both W and H to max 8
363 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000364 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100365 assert not support.is_operator_supported(op)
366 # VALID padding limits are much larger
Michael McGeagh16895482020-12-14 15:51:20 +0000367 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100368 assert support.is_operator_supported(op)
369
370
371def test_constraint_filter_height_range_valid_pad():
372 # Avg pool restrictions are dependent on padding:
373 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000374 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100375 assert support.is_operator_supported(op)
376 # VALID padding restricts to 256 in filter height
377 op.attrs["filter_height"] = 257
378 assert not support.is_operator_supported(op)
379
380
381def test_constraint_filter_product_height_range_valid_pad():
382 # Avg pool restrictions are dependent on padding:
383 op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000384 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.VALID}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100385 assert support.is_operator_supported(op)
386 # VALID padding restricts filter W x H to 256x256
387 op.attrs["filter_width"] = 257
388 assert not support.is_operator_supported(op)
389
390
391def test_constraint_filter_height_range():
392 # Max pool restrictions arent dependent on padding
393 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000394 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100395 assert support.is_operator_supported(op)
396 # Restricts to 256 in filter height
397 op.attrs["filter_height"] = 257
398 assert not support.is_operator_supported(op)
399 # Doesnt matter if SAME or VALID
Michael McGeagh16895482020-12-14 15:51:20 +0000400 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100401 assert not support.is_operator_supported(op)
402
403
404def test_constraint_filter_product_height_range():
405 # Max pool restrictions arent dependent on padding
406 op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8])
Michael McGeagh16895482020-12-14 15:51:20 +0000407 op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": Padding.SAME}
Michael McGeagh65fd9982020-10-20 11:49:28 +0100408 assert support.is_operator_supported(op)
409 # Restricts filter W x H to 256x256
410 op.attrs["filter_width"] = 257
411 assert not support.is_operator_supported(op)
412 # Doesnt matter if SAME or VALID
Michael McGeagh16895482020-12-14 15:51:20 +0000413 op.attrs["padding"] = Padding.VALID
Michael McGeagh65fd9982020-10-20 11:49:28 +0100414 assert not support.is_operator_supported(op)
415
416
417def test_constraint_resize():
418 # IFM W and H == 1
419 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
420 assert support.is_operator_supported(op)
421 # IFM == OFM
422 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
423 assert support.is_operator_supported(op)
424 # IFM x2 == OFM ; align_corners = False
425 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
426 assert support.is_operator_supported(op)
427 # IFM x2 -1 == OFM ; align_corners = True
428 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
429 op.attrs["align_corners"] = True
430 assert support.is_operator_supported(op)
431 # Invalid cases
432 op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
433 assert not support.is_operator_supported(op)
434 op.attrs["align_corners"] = True
435 assert not support.is_operator_supported(op)
436
437
438def test_constraint_matching_shapes():
439 # Softmax requires the ifm and ofm shapes to match
440 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4])
441 assert not support.is_operator_supported(op)
442 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
443 assert support.is_operator_supported(op)
444
445
Patrik Gustavsson2fa15882020-11-13 09:02:31 +0100446def test_constraint_beta_value_range():
447 # beta must be positive
448 op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8])
449 op.attrs["beta"] = -1.0
450 assert not support.is_operator_supported(op)
451 op.attrs["beta"] = 0.0
452 assert support.is_operator_supported(op)
453
454
Michael McGeagh65fd9982020-10-20 11:49:28 +0100455def test_constraint_splitv_inferred():
456 # SplitV requires a maximum of one inferred shape (-1)
457 qp = testutil.default_quant_params()
458 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
459 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
460 op.add_input_tensor(sizes)
461 assert not support.is_operator_supported(op)
462 op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
463 sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
464 op.add_input_tensor(sizes)
465 assert support.is_operator_supported(op)
466
467
468def test_constraint_concat_pass():
469 # A working concat
470 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
471 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
472 ifm2.quantization = testutil.default_quant_params()
473 op.add_input_tensor(ifm2)
474 op.attrs["axis"] = 3
475 assert support.is_operator_supported(op)
476
477
478def test_constraint_axis_exists():
479 # Missing axis attribute
480 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
481 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
482 ifm2.quantization = testutil.default_quant_params()
483 op.add_input_tensor(ifm2)
484 assert not support.is_operator_supported(op)
485
486
487def test_constraint_axis_valid():
488 # Invalid axis attribute
489 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
490 ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2")
491 ifm2.quantization = testutil.default_quant_params()
492 op.add_input_tensor(ifm2)
493 op.attrs["axis"] = 7
494 assert not support.is_operator_supported(op)
495
496
497def test_constraint_matching_dimensionality():
498 # Mismatching dimensionality: 4D+2D=4D
499 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
500 ifm2 = Tensor([1, 4], DataType.uint8, "in2")
501 ifm2.quantization = testutil.default_quant_params()
502 op.add_input_tensor(ifm2)
503 op.attrs["axis"] = 3
504 assert not support.is_operator_supported(op)
505
506
507def test_constraint_valid_dimensions():
508 # Mismatching dimension value:
509 # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm
510 op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8])
511 ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2")
512 ifm2.quantization = testutil.default_quant_params()
513 op.add_input_tensor(ifm2)
514 op.attrs["axis"] = 3
515 assert not support.is_operator_supported(op)
516
517
518def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets):
519 qp = testutil.default_quant_params()
520 in0 = Tensor(in_shape, DataType.uint8, "in")
521 in0.quantization = qp
522 in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp)
523 in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp)
524 in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp)
525 out = Tensor(out_shape, DataType.uint8, "out")
526 out.quantization = qp
527 attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0}
528 return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs)
529
530
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100531def create_pad_op(
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100532 in_shape,
533 out_shape,
534 padding,
535 in_dtype=DataType.int8,
536 out_dtype=DataType.int8,
537 pad_dtype=DataType.int32,
538 pad_setting=Padding.VALID,
Louis Verhaardebf4af62021-01-27 15:57:57 +0100539 kernel_size=3,
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100540):
541 qp = testutil.default_quant_params()
542 in0 = Tensor(in_shape, in_dtype, "in")
543 in0.quantization = qp
544 pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
545 out = Tensor(out_shape, out_dtype, "out")
546 out.quantization = qp.clone()
547 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100548 conv_out_tens = Tensor(in_shape, in_dtype, "output")
549 conv_out_tens.quantization = qp.clone()
Louis Verhaardebf4af62021-01-27 15:57:57 +0100550 weight_tens = Tensor([kernel_size, kernel_size, in_shape[-1], out_shape[-1]], in_dtype, "weights")
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100551 weight_tens.values = np.zeros(weight_tens.shape)
552 weight_tens.quant_values = np.zeros(weight_tens.shape, np.int8)
553 weight_tens.quantization = qp.clone()
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100554 bias_tens = Tensor(out_shape, pad_dtype, "biases")
555 attrs = {"padding": pad_setting, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1}
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100556 attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1)
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100557 conv2d_op = testutil.create_op(Op.Conv2DBias, [out, weight_tens, bias_tens], conv_out_tens, attrs)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100558 conv2d_op.add_input_tensor(out)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100559 return op
560
561
562def test_constraint_pad_input_count():
563 # Incorrect number of input tensors (2)
564 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
565 assert support.is_operator_supported(op)
566 op.add_input_tensor(op.inputs[0].clone())
567 assert not support.is_operator_supported(op)
568
569
570def test_constraint_padded_dimensions():
571 # Incorrect padding dimensions, can only pad width and height
572 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[1, 1], [1, 1], [1, 1], [0, 0]],)
573 assert not support.is_operator_supported(op)
574
575
576def test_constraint_pad_shape():
577 # PAD operator must be of shape (4,2)
578 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],)
579 assert not support.is_operator_supported(op)
580
581
582def test_constraint_pad_none():
583 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[],)
584 assert not support.is_operator_supported(op)
585
586
587def test_constraint_pad_dtype():
588 # PAD operator dtype should be int32 or int64
589 op = create_pad_op(
590 in_shape=[1, 1, 1, 1],
591 out_shape=[1, 3, 3, 1],
592 padding=[[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
593 pad_dtype=DataType.int16,
594 )
595 assert not support.is_operator_supported(op)
596
597
598def test_constraint_pad_consumer():
599 # PAD operator must be followed by a valid consumer with Padding.VALID attribute
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100600 op = create_pad_op(in_shape=[1, 1, 1, 1], out_shape=[1, 3, 3, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]],)
601 assert support.is_operator_supported(op)
602 op = create_pad_op(
603 in_shape=[1, 1, 1, 1],
604 out_shape=[1, 3, 3, 1],
605 padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
606 pad_setting=Padding.SAME,
607 )
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100608 assert not support.is_operator_supported(op)
erik.andersson@arm.com7b676492021-01-18 14:23:12 +0100609 op_consumer = testutil.create_op_with_quant_tensors(Op.ConcatTFLite, [1, 1, 1, 4], [1, 1, 1, 8])
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100610 op.ofm.consumer_list = [op_consumer]
611 assert not support.is_operator_supported(op)
Louis Verhaard1a92f782021-02-09 16:08:26 +0100612 op_consumer = testutil.create_elemwise_op(Op.Add, "op", [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1])
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100613 op.ofm.consumer_list = [op_consumer]
614 assert not support.is_operator_supported(op)
615
616
Louis Verhaardebf4af62021-01-27 15:57:57 +0100617pad_invalid_size_test_data = [
618 (2, 1, 1, 1),
619 (1, 2, 1, 1),
620 (1, 1, 2, 1),
621 (1, 1, 1, 2),
622]
623
624
625@pytest.mark.parametrize("top, left, bottom, right", pad_invalid_size_test_data)
626def test_constraint_pad_size(top, left, bottom, right):
627 # Tests PAD operator with a padding that is too high to be handled by the NPU
628 out_shape = [1, 11 + left + right, 11 + top + bottom, 1]
629 padding = [[0, 0], [top, bottom], [left, right], [0, 0]]
630 op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding,)
631 assert not support.is_operator_supported(op)
632
633
634leading_pad_test_data = [
635 (2, 2, 11, True),
636 (1, 2, 11, False),
637 (2, 1, 11, False),
638 (5, 2, 11, True),
639]
640
641
642@pytest.mark.parametrize("top, left, kernel_size, expected", leading_pad_test_data)
643def test_constraint_leading_pad_size(top, left, kernel_size, expected):
644 # Tests PAD operator with big kernel size; top and left pad must be multiple of stride
645 out_shape = [1, 11 + left, 11 + top, 1]
646 padding = [[0, 0], [top, 0], [left, 0], [0, 0]]
647 op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding, kernel_size=kernel_size)
648 assert support.is_operator_supported(op) == expected
649
650
Louis Verhaard1a92f782021-02-09 16:08:26 +0100651pad_avg_pool_test_data = [
652 ((3, 3), (1, 1, 1, 1), True),
653 ((2, 4), (1, 2, 1, 2), True),
654 ((5, 3), (2, 1, 2, 1), True),
655 ((5, 3), (0, 1, 2, 1), True),
656 ((5, 3), (2, 0, 2, 1), True),
657 ((5, 3), (2, 1, 0, 1), True),
658 ((5, 3), (2, 1, 0, 1), True),
659 ((4, 4), (2, 2, 2, 2), True),
660 ((4, 4), (1, 2, 2, 2), False),
661 ((4, 4), (2, 1, 2, 2), False),
662 ((4, 4), (2, 2, 1, 2), False),
663 ((4, 4), (2, 2, 2, 1), False),
664]
665
666
667@pytest.mark.parametrize("k_size, padding, expected", pad_avg_pool_test_data)
668def test_pad_followed_by_avg_pool(k_size, padding, expected):
669 # Tests PAD followed by AvgPool
670 k_w, k_h = k_size
671 top, left, bottom, right = padding
672 pad_values = [[0, 0], [top, bottom], [left, right], [0, 0]]
673 dtype = DataType.int8
674 qp = testutil.default_quant_params()
675 in_shape = [1, 15, 17, 8]
676 out_shape = [1, in_shape[1] + top + bottom, in_shape[2] + left + right, in_shape[3]]
677 in0 = Tensor(in_shape, dtype, "in")
678 in0.quantization = qp
679 pad_tensor = create_const_tensor(
680 name="pad", shape=list(np.shape(pad_values)), values=pad_values, dtype=DataType.int32
681 )
682 out = Tensor(out_shape, dtype, "out")
683 out.quantization = qp.clone()
684 op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
685 pool_out_tens = Tensor(in_shape, dtype, "output")
686 pool_out_tens.quantization = qp.clone()
687 attrs = {
688 "padding": Padding.VALID,
689 "ksize": [1, k_w, k_h, 1],
690 "stride_w": 1,
691 "stride_h": 1,
692 "dilation_w_factor": 1,
693 "dilation_h_factor": 1,
694 }
695 pool_op = testutil.create_op(Op.AvgPool, [out], pool_out_tens, attrs)
696 pool_op.add_input_tensor(out)
697 assert support.is_operator_supported(op) == expected
698
699
Michael McGeagh65fd9982020-10-20 11:49:28 +0100700def create_strided_slice():
701 # Creates a valid strided slice operator with some valid inputs/outputs
702 op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])
703 op.attrs["begin_mask"] = 1
704 op.attrs["end_mask"] = 9
705 assert support.is_operator_supported(op)
706 return op
707
708
709def test_constraint_stridedslice_input_count():
710 # Wrong number of input tensors
711 op = create_strided_slice()
712 op.add_input_tensor(op.inputs[0].clone())
713 assert not support.is_operator_supported(op)
714
715
716def test_constraint_stridedslice_inputs_const():
717 # begin, end, stride values must not be None
718 op = create_strided_slice()
719 op.inputs[1].values = None
720 assert not support.is_operator_supported(op)
721 op = create_strided_slice()
722 op.inputs[2].values = None
723 assert not support.is_operator_supported(op)
724 op = create_strided_slice()
725 op.inputs[3].values = None
726 assert not support.is_operator_supported(op)
727
728
Michael McGeagh65fd9982020-10-20 11:49:28 +0100729def test_constraint_stridedslice_stride_values():
730 # Unsupported strides
731 op = create_strided_slice()
732 op.inputs[3].values = [1, 1, 2, 1]
733 assert not support.is_operator_supported(op)
734
735
736def test_constraint_ellipsis_mask():
737 # Unsupported ellipsis mask
738 op = create_strided_slice()
739 op.attrs["ellipsis_mask"] = 1
740 assert not support.is_operator_supported(op)
741
742
743def test_constraint_axis_masks():
744 op = create_strided_slice()
745 # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok
746 op.attrs["new_axis_mask"] = 2
747 assert support.is_operator_supported(op)
748 op = create_strided_slice()
749 op.attrs["shrink_axis_mask"] = 3
750 assert support.is_operator_supported(op)
751 # But setting both to non-zero is not supported
752 op.attrs["new_axis_mask"] = 2
753 assert not support.is_operator_supported(op)
754
755
756def test_constraint_slice_ranges():
757 # Examples where end offset <= begin offset
758 op = create_strided_slice()
759 op.inputs[1].values = [0, 7, 2, 0]
760 assert not support.is_operator_supported(op)
761 op = create_strided_slice()
762 op.inputs[2].values = [0, 7, 2, 0]
763 assert not support.is_operator_supported(op)
764 op = create_strided_slice()
765 op.attrs["begin_mask"] = 0
766 assert not support.is_operator_supported(op)
767 op = create_strided_slice()
768 op.attrs["end_mask"] = 0
769 assert not support.is_operator_supported(op)
770
771
772def test_constraint_matching_inputs_types():
773 # input data types must match (default is uint8)
774 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
775 op.ifm2.dtype = DataType.int8
776 assert not support.is_operator_supported(op)
777
778
779def test_constraint_matching_signed():
780 # signed inputs require output to also be signed
781 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
782 op.ofm.dtype = DataType.uint8
783 assert not support.is_operator_supported(op)
784
785
786def test_constraint_unsigned_valid():
787 # unsigned inputs require output to be either:
788 op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
789 # the same (default uint8)
790 assert support.is_operator_supported(op)
791 op.ofm.dtype = DataType.int8
792 assert not support.is_operator_supported(op)
793 op.ofm.dtype = DataType.int16
794 assert not support.is_operator_supported(op)
795 # or int32
796 op.ofm.dtype = DataType.int32
797 assert support.is_operator_supported(op)
798
799
800def test_constraint_inputs_int32():
801 # both inputs must be type int32
802 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
803 assert not support.is_operator_supported(op)
804 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
805 assert support.is_operator_supported(op)
806 op.ifm2.dtype = DataType.int16
807 assert not support.is_operator_supported(op)
808
809
810def test_constraint_output_int32():
811 # output must be type int32
812 op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
813 assert support.is_operator_supported(op)
814 op.ofm.dtype = DataType.int16
815 assert not support.is_operator_supported(op)
816
817
818def test_constraint_matching_quantization_parameters():
819 qp = QuantizationParameters()
820 qp.scale_f32 = np.float32(1.5)
821 qp.zero_point = 128
822 # valid - all matching (uses default quant params)
823 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
824 assert support.is_operator_supported(op)
825 # invalid - ifm mismatch ofm
826 op.ifm.quantization = qp
827 assert not support.is_operator_supported(op)
828 # invalid - ifm2 mismatch ofm
829 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
830 op.ifm2.quantization = qp
831 assert not support.is_operator_supported(op)
832 # invalid - both ifm and ifm2 mismatch ofm
833 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8])
834 op.ifm.quantization = qp
835 op.ifm2.quantization = qp
836 assert not support.is_operator_supported(op)
837 # valid - all matching
838 op.ofm.quantization = qp
839 assert support.is_operator_supported(op)
Erik Anderssonf27a8b62020-12-10 14:58:23 +0100840 op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], None, [1, 8, 8, 8])
841 assert support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100842
843
844def test_constraint_elemwise_batch_size():
845 # BINARY CASE
846 # Batch can be >1 if dims is <=2D
847 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2])
848 assert support.is_operator_supported(op)
849 # For dims >2D, batch must be 1
850 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2])
851 assert support.is_operator_supported(op)
852 # invalid case
853 op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2])
854 assert not support.is_operator_supported(op)
855
856 # UNARY CASE
857 # Batch can be >1 if dims is <=2D
858 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
859 assert support.is_operator_supported(op)
860 # For dims >2D, batch must be 1
861 op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32)
862 assert support.is_operator_supported(op)
863 # invalid case
864 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32)
865 assert not support.is_operator_supported(op)
866
867
868def test_constraint_matching_either_shapes():
869 # BINARY CASE
870 # At least one ifm shape must match ofm's shape
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100871 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100872 assert support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100873 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [1, 4], [4, 4])
Michael McGeagh65fd9982020-10-20 11:49:28 +0100874 assert support.is_operator_supported(op)
875 op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2])
876 assert not support.is_operator_supported(op)
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100877 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 4, 16])
878 assert not support.is_operator_supported(op)
879 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 4, 16])
880 assert not support.is_operator_supported(op)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100881
882 # UNARY CASE
883 # No second input so this is treated the same as requiring ifm shape to match ofm shape
884 op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32)
885 assert support.is_operator_supported(op)
886 op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32)
887 assert not support.is_operator_supported(op)
888
889
Andreas Nevalainend059d8b2020-11-19 14:40:35 +0100890def test_constraint_broadcast_shapes():
891 # BINARY CASE
892 # Only allow broadcast to 1 dim, for 1 rank index
893 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4], [1, 2, 4], [1, 2, 4])
894 assert support.is_operator_supported(op)
895 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 1, 4], [1, 2, 4])
896 assert support.is_operator_supported(op)
897 # Only allow broadcast to 1 dim, for 3 rank indexes
898 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 1, 1], [1, 4, 8, 16], [1, 4, 8, 16])
899 assert support.is_operator_supported(op)
900 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 8, 16], [1, 1, 1, 1], [1, 4, 8, 16])
901 assert support.is_operator_supported(op)
902 # One broadcast dim not 1
903 op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 4], [1, 4, 4], [1, 4, 4])
904 assert not support.is_operator_supported(op)
905 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 4], [1, 2, 4], [1, 4, 4])
906 assert not support.is_operator_supported(op)
907 # OFM shape dim largest ifm/ifm2 shape dim
908 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
909 assert not support.is_operator_supported(op)
910 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4], [4, 4], [1, 4])
911 assert not support.is_operator_supported(op)
912 op = testutil.create_elemwise_op(Op.Add, "op", [1, 4, 1, 16], [1, 1, 4, 1], [1, 4, 1, 16])
913 assert not support.is_operator_supported(op)
914 op = testutil.create_elemwise_op(Op.Add, "op", [1, 1, 4, 1], [1, 4, 1, 16], [1, 4, 1, 16])
915 assert not support.is_operator_supported(op)
916
917
Michael McGeagh65fd9982020-10-20 11:49:28 +0100918def test_constraint_alpha_valid():
919 # Alpha cannot be negative
920 op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2])
921 op.attrs["alpha"] = 0
922 assert support.is_operator_supported(op)
923 op.attrs["alpha"] = -1
924 assert not support.is_operator_supported(op)
Diqing Zhong189f7482021-01-26 12:12:51 +0100925
926
927def test_constraint_hardswish_dtype():
928 # HardSwish operator dtype should be int8 or uint8, and input dtype must match output
929 # UINT8
930 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8])
931 assert support.is_operator_supported(op)
932 # INT8
933 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8)
934 assert support.is_operator_supported(op)
935
936 # Invalid
937 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int16)
938 assert not support.is_operator_supported(op)
939 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.uint16)
940 assert not support.is_operator_supported(op)
941 op = testutil.create_op_with_quant_tensors(Op.HardSwish, [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32)
942 assert not support.is_operator_supported(op)
943
944 in_tens = Tensor([1, 8, 8, 8], DataType.int8, "in")
945 out_tens = Tensor([1, 8, 8, 8], DataType.uint8, "out")
946 op = testutil.create_op(Op.HardSwish, [in_tens], out_tens)
947 assert not support.is_operator_supported(op)
erik.andersson@arm.com0cbb1662021-02-22 15:47:07 +0100948
949
950def test_constraint_keep_dims_ifm_ofm():
951 op = testutil.create_op_with_quant_tensors(Op.FullyConnected, [4, 8, 8, 4], [32, 32], weights_shape=[4, 8, 8, 4])
952 op.attrs["keep_num_dims"] = True
953 assert not support.is_operator_supported(op)
954 op.attrs["keep_num_dims"] = False
955 assert support.is_operator_supported(op)
Dwight Lidman4f728c02020-12-17 15:14:45 +0100956
957
958def create_mean(input_shape, output_shape, indices, datatype, attrs):
959 ifm = Tensor(input_shape, datatype, "in")
960 ifm.quantization = testutil.default_quant_params()
961 indices = create_const_tensor("indices", [len(indices)], DataType.int32, indices, np.uint8)
962 ofm = Tensor(output_shape, datatype, "out")
963 ofm.quantization = testutil.default_quant_params()
964 op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
965 return op
966
967
968def test_mean_dtype():
969 op = create_mean([1, 6, 6, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
970 assert support.is_operator_supported(op)
971 op.ifm.dtype = DataType.int16
972 op.ofm.dtype = DataType.int16
973 assert not support.is_operator_supported(op)
974
975
976def test_mean_properties():
977 op = create_mean([1, 6, 6, 256], [1, 1, 256], [1, 2], DataType.uint8, {})
978 assert support.is_operator_supported(op)
979 op.ifm.quantization.zero_point = 55
980 assert not support.is_operator_supported(op)
981
982
983def test_mean_axis():
984 op = create_mean([1, 6, 6, 16], [1, 1, 1, 16], [1], DataType.int8, {"keep_dims": True})
985 assert not support.is_operator_supported(op)
986
987
988def test_mean_hw_product():
989 op = create_mean([1, 64, 64, 16], [1, 1, 16], [1, 2], DataType.uint8, {})
990 assert support.is_operator_supported(op)
991 op = create_mean([1, 65, 64, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
992 assert not support.is_operator_supported(op)
993
994
995def test_mean_hw_product_int8():
996 op = create_mean([1, 16, 16, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
997 assert support.is_operator_supported(op)
998 op = create_mean([1, 16, 17, 16], [1, 1, 1, 16], [1, 2], DataType.int8, {"keep_dims": True})
999 assert not support.is_operator_supported(op)