blob: 2a7d484829b3001fb7e6d208132946b7668318bb [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Jerry Ge54bb61e2023-12-20 22:21:24 +00002# Copyright (c) 2020-2024, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
Tai Lycf84bc92023-09-07 20:49:09 +000031
Jeremy Johnson015c3552022-02-23 12:15:03 +000032from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700177 "rank": (0, 4),
178 "custom_shapes": {
179 "custom_shape_only": False,
180 "shape_list": [()],
181 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000182 },
183 "bitwise_and": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_or": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_not": {
194 "operands": (1, 0),
195 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "bitwise_xor": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
201 "types": {"tf": TYPE_I}, # Not supported in TF Lite
202 },
203 "logical_and": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_or": {
209 "operands": (2, 0),
210 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "logical_not": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
216 "types": TYPE_B,
217 },
218 "reduce_any": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
221 "types": TYPE_B,
222 },
223 "reduce_all": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800226 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000227 },
228 "reduce_min": {
229 "operands": (1, 0),
230 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
231 "types": {
232 "tf": TYPE_FI,
233 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
234 },
235 },
236 "reduce_max": {
237 "operands": (1, 0),
238 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
239 "types": {
240 "tf": TYPE_FI,
241 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
242 },
243 },
244 "reduce_sum": {
245 "operands": (1, 0),
246 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
247 "types": {
248 "tf": TYPE_F,
249 # v2 converter doesn't recognize quantized reduce_sum
250 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
251 "tflite": TYPE_F,
252 },
253 },
254 "reduce_mean": {
255 "operands": (1, 0),
256 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
257 "types": {
258 "tf": TYPE_F,
259 "tflite": list(
260 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
261 ),
262 },
263 },
264 "reduce_product": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
267 "types": TYPE_F,
268 },
269 "min": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "max": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
277 "types": TYPE_FI,
278 },
279 "pow": {
280 "operands": (2, 0),
281 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
282 # Technically, integer is supported, but only for positive exponents.
283 # Needs a random argument generator.
284 "types": TYPE_F,
285 },
286 "abs": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "ceil": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "floor": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "log": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "negate": {
307 "operands": (1, 0),
308 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
309 "types": TYPE_F,
310 },
311 "rsqrt": {
312 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800313 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
317 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000318 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800319 "sign": {
320 "operands": (1, 0),
321 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
322 "types": {
323 "tf": TYPE_F,
324 },
325 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000326 "sigmoid": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
336 "tanh": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
339 "types": {
340 "tf": TYPE_F,
341 "tflite": list(
342 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
343 ),
344 },
345 },
Won Jeon78155c62023-06-10 00:20:04 +0000346 "erf": {
347 "operands": (1, 0),
348 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tf": TYPE_F,
351 },
352 },
Luke Hutton41601862022-12-06 17:29:15 +0000353 "sin": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000356 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000357 },
358 "cos": {
359 "operands": (1, 0),
360 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000361 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000362 },
Luke Hutton2138a192022-12-15 11:01:39 +0000363 "atan2": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
366 "types": {
367 "tflite": TYPE_F,
368 },
369 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000370 "square": {
371 "operands": (1, 0),
372 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
373 "types": TYPE_F,
374 },
375 "squared_difference": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000378 "types": {
379 "tf": TYPE_F,
380 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
381 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000382 },
383 "equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "greater_equal": {
389 "operands": (2, 0),
390 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
391 "types": TYPE_FI,
392 },
393 "greater": {
394 "operands": (2, 0),
395 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
396 "types": TYPE_FI,
397 },
398 "less": {
399 "operands": (2, 0),
400 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
401 "types": TYPE_FI,
402 },
403 "less_equal": {
404 "operands": (2, 0),
405 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
406 "types": TYPE_FI,
407 },
408 "conv2d_TEMPLATE": {
409 "operands": (1, 1),
410 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 "conv2d_relu6_TEMPLATE": {
437 "operands": (1, 2),
438 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
439 "types": {
440 "tf": [tf.float32],
441 "tflite": [
442 tf.float32,
443 QuantType.CONV_U8_U8,
444 QuantType.CONV_I8_I8,
445 QuantType.CONV_I16_I8,
446 ],
447 },
448 "template": True,
449 },
450 "conv2d_relu_n1_to_1_TEMPLATE": {
451 "operands": (1, 2),
452 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
453 "types": {
454 "tf": [tf.float32],
455 "tflite": [
456 tf.float32,
457 QuantType.CONV_U8_U8,
458 QuantType.CONV_I8_I8,
459 QuantType.CONV_I16_I8,
460 ],
461 },
462 "template": True,
463 },
464 # This test is converted as:
465 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
466 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
467 "conv2d_tanh_TEMPLATE": {
468 "operands": (1, 2),
469 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
470 "types": {
471 "tf": [tf.float32],
472 "tflite": [
473 tf.float32,
474 QuantType.CONV_U8_U8,
475 QuantType.CONV_I8_I8,
476 QuantType.CONV_I16_I8,
477 ],
478 },
479 "template": True,
480 },
481 "conv2d_bias_TEMPLATE": {
482 "operands": (1, 2),
483 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
484 "types": {
485 "tf": [tf.float32],
486 "tflite": [
487 tf.float32,
488 QuantType.CONV_U8_U8,
489 QuantType.CONV_I8_I8,
490 QuantType.CONV_I16_I8,
491 ],
492 },
493 "bias": True,
494 "template": True,
495 },
TatWai Chongfd629052022-07-25 04:01:58 +0000496 "conv3d_TEMPLATE": {
497 "operands": (1, 1),
498 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 # Quantization to 16x8-bit not yet supported by tflite.
506 ],
507 },
508 "template": True,
509 "rank": (1, 5),
510 },
511 "conv3d_bias_TEMPLATE": {
512 "operands": (1, 2),
513 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 # Quantization to 16x8-bit not yet supported by tflite.
521 ],
522 },
523 "bias": True,
524 "template": True,
525 "rank": (1, 5),
526 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000527 "depthwise_conv2d_TEMPLATE": {
528 "operands": (1, 1),
529 "build_fcn": (
530 TBuilder.DepthwiseConv2d,
531 TGen.tgDepthwiseConv2d,
532 ArgGen.agDepthwiseConv2d,
533 ),
534 "types": {
535 "tf": [tf.float32],
536 "tflite": [
537 tf.float32,
538 QuantType.CONV_U8_U8,
539 QuantType.CONV_I8_I8,
540 QuantType.CONV_I16_I8,
541 ],
542 },
543 "template": True,
544 },
545 "depthwise_conv2d_bias_TEMPLATE": {
546 "operands": (1, 2),
547 "build_fcn": (
548 TBuilder.DepthwiseConv2dWithBias,
549 TGen.tgDepthwiseConv2d,
550 ArgGen.agDepthwiseConv2d,
551 ),
552 "types": {
553 "tf": [tf.float32],
554 "tflite": [
555 tf.float32,
556 QuantType.CONV_U8_U8,
557 QuantType.CONV_I8_I8,
558 QuantType.CONV_I16_I8,
559 ],
560 },
561 "bias": True,
562 "template": True,
563 },
564 "transpose_conv2d_TEMPLATE": {
565 "operands": (1, 1),
566 "build_fcn": (
567 TBuilder.TransposeConv2d,
568 TGen.tgTransposeConv2d,
569 ArgGen.agTransposeConv2d,
570 ),
571 "types": {
572 "tf": [tf.float32],
573 "tflite": [
574 tf.float32,
575 QuantType.CONV_U8_U8,
576 QuantType.CONV_I8_I8,
577 QuantType.CONV_I16_I8,
578 ],
579 },
580 "template": True,
581 },
582 "argmax": {
583 "operands": (1, 0),
584 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
585 "types": {"tf": TYPE_F},
586 },
587 "avg_pool2d": {
588 "operands": (1, 0),
589 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
590 "types": {
591 "tf": TYPE_F,
592 "tflite": list(
593 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
594 ),
595 },
596 },
597 "max_pool2d": {
598 "operands": (1, 0),
599 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
600 "types": {
601 "tf": TYPE_F,
602 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
603 # ALL_I16 not supported yet
604 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
605 # QI16 is missing from MaxPoolOperandAndResultConstraints
606 # If adding QI16 back this test can run through.
607 },
608 },
609 "reshape": {
610 "operands": (1, 0),
611 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
612 "types": TYPE_FI,
613 },
614 "transpose": {
615 "operands": (1, 0),
616 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
617 "types": TYPE_FI,
618 },
619 "slice": {
620 "operands": (1, 0),
621 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
622 "types": TYPE_FI,
623 },
624 "strided_slice": {
625 "operands": (1, 0),
626 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
627 "types": TYPE_FI,
628 },
629 "select": {
630 "operands": (3, 0),
631 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
632 "types": TYPE_FI,
633 },
634 "addn": {
635 "operands": (4, 0),
636 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
637 "types": TYPE_FI,
638 },
639 "concatv2": {
640 "operands": (4, 0),
641 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
642 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700643 "rank": (0, 4),
644 "custom_shapes": {
645 "custom_shape_only": False,
646 "shape_list": [()],
647 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000648 },
649 "stack": {
650 "operands": (4, 0),
651 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
652 "types": TYPE_FI,
653 },
654 "unstack": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
657 "types": TYPE_F,
658 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000659 "mirrorpad": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
662 "types": TYPE_FI,
663 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000664 "pad": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800667 "types": {
668 "tf": TYPE_F,
669 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
670 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000671 },
672 "expand_dims": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
675 "types": TYPE_FI,
676 },
677 "shape": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
680 "types": TYPE_FI,
681 },
682 "rank": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_FI,
686 },
687 "fill": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
690 "types": TYPE_FI,
691 },
692 "elu": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
695 "types": TYPE_F,
696 },
697 "softmax": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
700 "types": {
701 "tf": TYPE_F,
702 "tflite": list(
703 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
704 ),
705 },
706 },
707 "log_softmax": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
710 "types": TYPE_F,
711 },
Jerry Ge28811d92023-12-05 00:53:26 +0000712 "dynamic_linear": {
713 "operands": (1, 0),
714 "build_fcn": (TBuilder.DynamicLinear, TGen.tgBasic, ArgGen.agNone),
715 "types": {
716 "tf": [],
717 "tflite": list(TYPE_F),
718 },
719 "custom_shapes": {
720 "custom_shape_only": True,
721 "shape_list": [(14, 19)],
722 },
723 # number of operands of tuples which spcifies which dim to set to None
724 # In this case, we have 1 input. So we have 1 tuple
725 # We're setting the first input's first dim to None
726 "dynamic_shape_dim": [
727 (0,),
728 ],
729 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000730 "matmul": {
731 "operands": (2, 0),
732 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
733 "types": {
734 "tf": TYPE_F,
735 "tflite": list(
736 TYPE_F
737 + [QuantType.ALL_U8, QuantType.ALL_I8]
738 # 16 bits matmul fail to convert
739 ),
740 },
741 },
742 "add_scalar": {
743 "operands": (1, 0),
744 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
745 "types": TYPE_F,
746 },
747 "add_1d": {
748 "operands": (2, 0),
749 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
750 "types": TYPE_F,
751 },
752 "split": {
753 "operands": (1, 0),
754 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
755 "types": TYPE_FI,
756 },
757 "tile": {
758 "operands": (1, 0),
759 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
760 "types": TYPE_FI,
761 },
762 "reverse": {
763 "operands": (1, 0),
764 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
765 "types": {"tf": TYPE_FI},
766 },
767 "gather": {
768 "operands": (1, 0),
769 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
770 "types": TYPE_FI,
771 },
772 "gather_nd": {
773 "operands": (1, 0),
774 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
775 "types": TYPE_FI,
776 },
777 "scatter_nd": {
778 "operands": (1, 0),
779 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
780 "types": TYPE_FI,
781 },
782 "space_to_batch": {
783 "operands": (1, 0),
784 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
785 "types": TYPE_F,
786 },
TatWai Chongbef907a2024-01-23 09:40:37 -0800787 "dynamic_space_to_batch": {
788 "operands": (1, 0),
789 "build_fcn": (
790 TBuilder.DynamicSpaceToBatch,
791 TGen.tgBasic,
792 ArgGen.agSpaceToBatch,
793 ),
794 "types": TYPE_F,
795 "custom_shapes": {
796 "custom_shape_only": True,
797 "shape_list": [(13, 21, 3)],
798 },
799 "dynamic_shape_dim": [
800 (
801 0,
802 1,
803 ),
804 ],
805 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000806 "batch_to_space": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
809 "types": TYPE_F,
810 },
Jerry Ge28811d92023-12-05 00:53:26 +0000811 "dynamic_batch_to_space": {
812 "operands": (1, 0),
813 "build_fcn": (
814 TBuilder.DynamicBatchToSpace,
815 TGen.tgBasic,
816 ArgGen.agBatchToSpace,
817 ),
818 "types": TYPE_F,
819 "custom_shapes": {
820 "custom_shape_only": True,
821 "shape_list": [(8, 4, 4, 4)],
822 },
823 # number of operands of tuples which spcifies which dim to set to None
824 # In this case, we have 1 input. So we have 1 tuple
825 # We're setting the first input's 0th dim to None
826 "dynamic_shape_dim": [
827 (0,),
828 ],
829 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000830 "space_to_depth": {
831 "operands": (1, 0),
832 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
833 "types": TYPE_F,
834 },
Jerry Ge28811d92023-12-05 00:53:26 +0000835 "dynamic_space_to_depth": {
836 "operands": (1, 0),
837 "build_fcn": (TBuilder.DynamicSpaceToDepth, TGen.tgBasic, ArgGen.agNone),
838 "types": {
839 "tf": [],
840 "tflite": list(TYPE_F),
841 },
842 "custom_shapes": {
843 "custom_shape_only": True,
844 "shape_list": [(1, 32, 32, 8)],
845 },
846 # number of operands of tuples which spcifies which dim to set to None
847 # In this case, we have 1 input. So we have 1 tuple
TatWai Chong6a46b252024-01-12 13:13:22 -0800848 # We're setting the first input's first (batch) dim to None
Jerry Ge28811d92023-12-05 00:53:26 +0000849 "dynamic_shape_dim": [
TatWai Chong6a46b252024-01-12 13:13:22 -0800850 (0,),
Jerry Ge28811d92023-12-05 00:53:26 +0000851 ],
852 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000853 "depth_to_space": {
854 "operands": (1, 0),
855 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
856 "types": TYPE_F,
857 },
Jerry Ge28811d92023-12-05 00:53:26 +0000858 "dynamic_depth_to_space": {
859 "operands": (1, 0),
860 "build_fcn": (TBuilder.DynamicDepthToSpace, TGen.tgBasic, ArgGen.agNone),
861 "types": {
862 "tf": [],
863 "tflite": list(TYPE_F),
864 },
865 "custom_shapes": {
866 "custom_shape_only": True,
867 "shape_list": [(1, 1, 1, 4)],
868 },
869 # number of operands of tuples which spcifies which dim to set to None
870 # In this case, we have 1 input. So we have 1 tuple
TatWai Chong6a46b252024-01-12 13:13:22 -0800871 # We're setting the first input's first (batch) dim to None
Jerry Ge28811d92023-12-05 00:53:26 +0000872 "dynamic_shape_dim": [
TatWai Chong6a46b252024-01-12 13:13:22 -0800873 (0,),
Jerry Ge28811d92023-12-05 00:53:26 +0000874 ],
875 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000876 "one_hot": {
877 "operands": (3, 1),
878 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
879 "types": TYPE_FI,
880 },
881 "fakequant": {
882 "operands": (1, 0),
883 "build_fcn": (
884 TBuilder.Fakequant,
885 TGen.tgBasic,
886 ArgGen.agFakequant,
887 ),
888 "types": {"tf": TYPE_F},
889 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800890 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000891 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800892 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700893 "types": {
894 "tf": TYPE_F,
895 "tflite": list(
896 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
897 ),
898 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000899 "custom_shapes": {
900 "custom_shape_only": False,
901 "shape_list": [(3, 1, 1, 7)],
902 },
TatWai Chongf7326092022-06-08 12:17:14 -0700903 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000904 "left_shift": {
905 "operands": (1, 0),
906 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
907 "types": {"tf": [tf.int32]},
908 },
909 "right_shift": {
910 "operands": (1, 0),
911 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
912 "types": {
913 "tf": [
914 tf.int32,
915 ]
916 },
917 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700918 "while": {
919 "operands": (1, 0),
920 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
921 "types": {
922 "tflite": list(TYPE_F),
923 },
924 },
925 "lstm": {
926 "operands": (1, 0),
927 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
928 "types": {
929 "tflite": [
930 tf.float32,
931 # tf.int32
932 ]
933 },
934 },
Tai Lycf84bc92023-09-07 20:49:09 +0000935 "lstm_stateful": {
936 "operands": (1, 0),
937 "build_fcn": (TBuilder.SLSTM, TGen.tgRecurrent, ArgGen.agNone),
938 "types": {
939 "tflite": [
940 tf.float32,
941 ]
942 },
943 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700944 "gru": {
945 "operands": (1, 0),
946 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
947 "types": {
948 "tflite": [
949 tf.float32,
950 # tf.int32
951 ]
952 },
953 },
954 "rnn": {
955 "operands": (1, 0),
956 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
957 "types": {
958 "tflite": [
959 tf.float32,
960 ]
961 },
962 },
Tai Lycf84bc92023-09-07 20:49:09 +0000963 "callonce": {
964 "operands": (1, 0),
965 "build_fcn": (TBuilder.CallOnce, TGen.tgBasic, ArgGen.agNone),
966 "types": {
967 "tflite": [tf.float32],
968 },
969 "custom_shapes": {
970 "custom_shape_only": True,
971 "shape_list": [(1,)],
972 },
973 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000974 "rfft2d": {
975 "operands": (1, 0),
976 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
977 "types": {
978 "tflite": TYPE_F,
979 },
980 },
Luke Hutton714aa602023-02-08 19:45:26 +0000981 "real": {
982 "operands": (1, 0),
983 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
984 "types": {
985 "tflite": [tf.complex64],
986 },
987 },
988 "imag": {
989 "operands": (1, 0),
990 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
991 "types": {
992 "tflite": [tf.complex64],
993 },
994 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000995 "broadcastto": {
996 "operands": (1, 1),
997 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
998 "types": {
999 "tf": TYPE_FIB,
1000 },
1001 },
Jeremy Johnson015c3552022-02-23 12:15:03 +00001002}
1003
1004# Shapes to be tested; default can be overwritten
1005shape_list = [
1006 (1,),
1007 (64,),
1008 (14, 19),
1009 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +00001010 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001011 (1, 4, 4, 4),
1012 (1, 8, 4, 17),
1013 (1, 4, 8, 19),
1014 (1, 32, 32, 8),
1015 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -08001016 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +00001017 (2, 2, 7, 7, 2),
1018 (1, 4, 8, 21, 17),
1019 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001020]
1021
1022
1023def gen_rand_shapes(args):
1024 """Overwrite the global shape list with a new list of random shapes"""
1025 global shape_list
1026
1027 rng = np.random.default_rng(args.random_seed)
1028
1029 # Don't let things get too big... cap the maximum volume, but let
1030 # an individual dimension be 1..47
1031 max_total_volume = 32 * 32 * 4
1032
1033 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +00001034 # Only iterate over ranks 2, 3, 4, and 5
1035 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001036 for n in range(args.random_shapes):
1037 new_shape = rng.integers(1, 48, size=rank)
1038
TatWai Chongfd629052022-07-25 04:01:58 +00001039 # Set the batch dimension on 4D or 5D objects to 1
1040 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +00001041 new_shape[0] = 1
1042
1043 # Limit the total shape volume and throw out any
1044 # shapes that wouldn't leave at least size=2 in some non-batch dimension
1045 volume = 1
1046 skip_shape = False
1047 for i in range(rank):
1048
1049 volume *= new_shape[i]
1050
1051 # Reduce the shape, while it's larger than the maximum volume
1052 while volume > max_total_volume:
1053 new_shape[i] = new_shape[i] // 2
1054 volume = volume // 2
1055
1056 # Now an untenable dimension size? Skip this one.
1057 if new_shape[i] < 1:
1058 skip_shape = True
1059
1060 if not skip_shape:
1061 shape_list.append(tuple(new_shape))
1062
1063
1064# Construct, run and save a whole tensorflow tf.function to a protobuf file
1065# or convert to .tflite if it's quantized unit test
1066def run_unit_test(
1067 op_name,
1068 args,
1069 test_dir,
1070 curr_shape,
1071 addl_args,
1072 dtype,
1073 excluded_framework_list,
1074 quantized_inference_dtype,
1075 result_name,
1076 seed,
1077):
1078
1079 try:
1080 op = TF_OP_LIST[op_name]
1081 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1082
1083 # Get and seed a random number generator for this test
1084 rng = np.random.default_rng(seed)
1085
1086 # return placeholders=(str: name, np.array: value)
1087 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +00001088 placeholders, consts = (
Won Jeon6c93f412023-07-08 07:04:08 +00001089 tensor_gen_fcn(op, curr_shape, dtype, rng, False)
Won Jeone2325d12023-06-10 15:25:54 +00001090 if tensor_gen_fcn.__name__ == "tgBFuzz"
1091 else tensor_gen_fcn(op, curr_shape, dtype, rng)
1092 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001093
1094 # if test doesn't have any placeholders/consts, terminated
1095 if len(placeholders) == 0 and len(consts) == 0:
1096 return True
1097
1098 if not args.quiet:
1099 print(" {} ".format(test_dir))
1100
1101 try:
1102 os.mkdir(test_dir)
1103 except FileExistsError:
1104 pass
1105
1106 const_nodes = [value for name, value in consts]
1107
1108 num_placeholders = len(placeholders)
1109 # if test is quantized, create tensor quantization metadata info for
1110 # each input tensor, based on different quantized type
1111 if quantized_inference_dtype:
1112 is_quantized = True
1113 # TODO: support INT8 IFM x INT4 weight later
1114 if quantized_inference_dtype == QuantType.ALL_U8:
1115 qzero = [128] * num_placeholders
1116 numpy_dtype = [np.uint8] * num_placeholders
1117 tflite_inference_dtype = tf.uint8
1118 elif quantized_inference_dtype == QuantType.ALL_I8:
1119 qzero = [0] * num_placeholders
1120 numpy_dtype = [np.int8] * num_placeholders
1121 tflite_inference_dtype = tf.int8
1122 elif quantized_inference_dtype == QuantType.ALL_I16:
1123 qzero = [0] * num_placeholders
1124 numpy_dtype = [np.int16] * num_placeholders
1125 tflite_inference_dtype = tf.int16
1126 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1127 assert (
1128 num_placeholders == 1
1129 ), "Unsupported number of placeholders for Convolution: {}".format(
1130 num_placeholders
1131 )
1132 qzero = [128] * num_placeholders
1133 if num_placeholders == 2:
1134 numpy_dtype = [np.uint8, np.uint8]
1135 else:
1136 numpy_dtype = [np.uint8, np.uint8, np.int32]
1137 tflite_inference_dtype = tf.uint8
1138 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1139 assert (
1140 num_placeholders == 1
1141 ), "Unsupported number of placeholders for Convolution: {}".format(
1142 num_placeholders
1143 )
1144 qzero = [0] * num_placeholders
1145 if num_placeholders == 2:
1146 numpy_dtype = [np.int8, np.int8]
1147 else:
1148 numpy_dtype = [np.int8, np.int8, np.int32]
1149 tflite_inference_dtype = tf.int8
1150 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1151 assert (
1152 num_placeholders == 1
1153 ), "Unsupported number of placeholders for Convolution: {}".format(
1154 num_placeholders
1155 )
1156 if num_placeholders == 2:
1157 qzero = [0, 0]
1158 numpy_dtype = [np.int16, np.int8]
1159 else:
1160 qzero = [0, 0, 0]
1161 numpy_dtype = [
1162 np.int16,
1163 np.int8,
1164 np.int64,
1165 ] # np.int64 to represent 40 bits accumulator
1166 tflite_inference_dtype = tf.int16
1167 else:
1168 raise Exception(
1169 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1170 )
1171
1172 else:
1173 is_quantized = False
1174
1175 tf_model_filename = None
1176 tf_result_npy_filename = None
1177 tf_result_name = None
1178
1179 tflite_model_filename = None
1180 tflite_result_npy_filename = None
1181 tflite_result_name = None
1182
1183 placeholder_names = []
1184 placeholder_vals = []
1185 placeholder_signatures = ()
1186 placeholder_npy_filenames = []
1187 placeholder_shapes = []
TatWai Chong6a46b252024-01-12 13:13:22 -08001188 placeholder_dynamic = False
Jeremy Johnson015c3552022-02-23 12:15:03 +00001189
1190 for idx, (name, val) in enumerate(placeholders):
Jerry Ge54bb61e2023-12-20 22:21:24 +00001191 input_shape = tuple(val.shape)
1192
Jerry Ge28811d92023-12-05 00:53:26 +00001193 try:
1194 dynamic_shape_dim_tuples = op["dynamic_shape_dim"]
1195 dim_tuple = dynamic_shape_dim_tuples[idx]
Jerry Ge54bb61e2023-12-20 22:21:24 +00001196 input_shape = list(input_shape)
TatWai Chongbef907a2024-01-23 09:40:37 -08001197
1198 # Set the dimensions of input that are listed in the builder profile to unknown.
1199 for dim in dim_tuple:
1200 input_shape[dim] = None
1201
TatWai Chong6a46b252024-01-12 13:13:22 -08001202 # When any dimension size is unknown, mark the placeholder as dynamic type.
1203 placeholder_dynamic = True
Jerry Ge28811d92023-12-05 00:53:26 +00001204
Jerry Ge54bb61e2023-12-20 22:21:24 +00001205 addl_args.append(tuple(input_shape))
Jerry Ge28811d92023-12-05 00:53:26 +00001206 except KeyError:
1207 pass
1208
Jeremy Johnson015c3552022-02-23 12:15:03 +00001209 placeholder_names.append(name)
1210 placeholder_signatures = placeholder_signatures + (
Jerry Ge54bb61e2023-12-20 22:21:24 +00001211 tf.TensorSpec(shape=input_shape, dtype=val.dtype, name=name),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001212 )
1213 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1214 placeholder_shapes.append(val.shape)
1215
1216 # Get test builder class
1217 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1218 concrete_function = tf.function(input_signature=placeholder_signatures)(
1219 fcn_node.eval
1220 ).get_concrete_function()
1221
1222 if is_quantized:
1223
1224 assert dtype is tf.float32, "quantized test must come from float32 graph"
1225
1226 # 1. Quantize float placeholder npy to quantized to feed the graph
1227 for idx, (name, val) in enumerate(placeholders):
1228
1229 # we use np.amin()/np.amax() to determine dynamic range
1230 # for quantized test
1231 zeropoint = 0
1232 scale = 1.0
1233 if numpy_dtype[idx] != np.int64:
1234 qmin = np.iinfo(numpy_dtype[idx]).min
1235 qmax = np.iinfo(numpy_dtype[idx]).max
1236 num_bits = np.iinfo(numpy_dtype[idx]).bits
1237 # 40 bit is represented as np.int64
1238 else:
1239 num_bits = 40
1240 qmin = -(1 << num_bits)
1241 qmax = (1 << num_bits) - 1
1242
1243 min_val = np.amin(val)
1244 max_val = np.amax(val)
1245
1246 # for single value tensor, we set scale equal to the abs(value),
1247 # and fix zeropoint to 128
1248 # if val > 0, it'll be represented as 129,
1249 # where val = (129 - 128) * val
1250 # if val < 0, it'll be represented as 127,
1251 # where val = (127 - 128) * (-val)
1252 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1253 # and let quantized 1 represent the value
1254 # also adjust effective min/max consequently
1255 if max_val == min_val:
1256 if max_val != 0:
1257 scale = abs(max_val)
1258 else:
1259 scale = 1.0
1260 min_val = float(qmin - qzero[idx]) * scale
1261 max_val = float(qmax - qzero[idx]) * scale
1262 else:
1263 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeon6c93f412023-07-08 07:04:08 +00001264 if op_name == "squared_difference":
1265 zeropoint = -int(round((-min_val) / scale)) + qmin
1266 else:
1267 zeropoint = int(round((-min_val) / scale)) + qmin
Jeremy Johnson015c3552022-02-23 12:15:03 +00001268
1269 # run through tf.fakequant first to assure quantization error aligned
1270 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1271 val,
1272 min=min_val,
1273 max=max_val,
1274 num_bits=num_bits,
1275 name="gen_quant_npy",
1276 )
1277
Jerry Ged69e2832023-07-05 21:54:07 +00001278 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001279
1280 # very few unit tests after TF hash may/2020, this quantized
1281 # value for some reason exceed [0, 255] range
1282 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1283
Jeremy Johnson015c3552022-02-23 12:15:03 +00001284 np.save(
1285 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001286 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001287 False,
1288 )
1289
1290 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1291
1292 # 2. Convert the model to quantized TFLite flatbuffer
1293 module = tf.Module()
1294 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1295 [concrete_function], module
1296 )
1297 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1298 converter.experimental_new_converter = True
1299
1300 # use MLIR-based post-quantizer
1301 converter.experimental_new_quantizer = True
1302
1303 flag = (
1304 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1305 )
1306 if tflite_inference_dtype == tf.int16:
1307 converter.target_spec.supported_ops = [flag]
1308
Won Jeone2325d12023-06-10 15:25:54 +00001309 # Generator function for integer quantization of TFLiteConverter
1310 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1311 # to calibrate/estimate the range of the floating-point inputs.
1312 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1313 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001314 def input_stats():
1315 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001316 placeholders, _ = (
Won Jeon6c93f412023-07-08 07:04:08 +00001317 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, True)
Won Jeone2325d12023-06-10 15:25:54 +00001318 if tensor_gen_fcn == "tgBFuzz"
1319 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1320 )
1321 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001322
1323 converter.representative_dataset = input_stats
1324 converter.inference_input_type = tflite_inference_dtype
1325 converter.inference_output_type = tflite_inference_dtype
1326
1327 tflite_model = converter.convert()
1328
1329 tflite_model_filename = "model.tflite"
1330
1331 # Write out converted model to disk
1332 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1333 f.write(tflite_model)
1334
1335 else: # is_quantized is False
1336
1337 # 1. Saved out numpy array directly
1338 for idx, (name, val) in enumerate(placeholders):
1339 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001340
1341 # Complex tensors are expected to be repsesented by a
1342 # single floating point tensor of shape [?, ..., ?, 2].
1343 if val.dtype == np.complex64:
1344 val_shape = val.shape + (2,)
1345 val = val.view(np.float32)
1346 val = val.reshape(val_shape)
1347
Jeremy Johnson015c3552022-02-23 12:15:03 +00001348 np.save(
1349 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1350 )
1351
1352 # 2.a Saved out .pb if framework includes tensorflow
1353 if "tf" not in excluded_framework_list:
1354 # Write out graph as protobuf to disk
1355 tf_model_filename = "model.pb"
1356 tf.io.write_graph(
1357 concrete_function.graph, test_dir, tf_model_filename, True
1358 )
1359
1360 # 2.b Saved out .tflite if framework includes tflite
1361 if "tflite" not in excluded_framework_list:
1362 # Convert the model to TFLite flatbuffer
1363 module = tf.Module()
Tai Lycf84bc92023-09-07 20:49:09 +00001364
1365 if op_name == "callonce" or op_name == "lstm_stateful":
1366 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1367 [concrete_function], fcn_node
1368 )
1369 else:
1370 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1371 [concrete_function], module
1372 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001373
1374 converter.experimental_new_converter = True
1375
1376 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1377 converter.inference_input_type = tf.float32
1378 converter.inference_output_type = tf.float32
1379 tflite_model = converter.convert()
1380
1381 # Write out converted model to disk
1382 tflite_model_filename = "model.tflite"
1383 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1384 f.write(tflite_model)
1385
1386 # Get TF reference result if .pb is specified
1387 if tf_model_filename:
1388 tf_result_npy_filename = "tf_result.npy"
1389 tf_result = concrete_function(*placeholder_vals)
1390 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1391
1392 tf_result_name = result_name
1393
1394 # Get TFLite inference result if .tflite is specified
1395 if tflite_model_filename:
1396 tflite_result_npy_filename = "tflite_result.npy"
1397
Luke Hutton5c844212023-01-27 14:17:52 +00001398 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001399
1400 if args.tflite_kernel_mode == "optimized" or (
1401 op_name in ops_with_optimized_only_kernel
1402 ):
1403 interpreter = tf.lite.Interpreter(
1404 model_path=os.path.join(test_dir, tflite_model_filename)
1405 )
1406 elif args.tflite_kernel_mode == "reference":
1407 interpreter = tf.lite.Interpreter(
1408 model_path=os.path.join(test_dir, tflite_model_filename),
1409 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1410 )
1411 else:
1412 assert 0, "unknown tflite interpreter mode {}".format(
1413 args.tflite_kernel_mode
1414 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001415
1416 input_details = interpreter.get_input_details()
1417 output_details = interpreter.get_output_details()
1418
Jerry Ge28811d92023-12-05 00:53:26 +00001419 # Prototype dynamic_shape testing
1420 # Need to resize the input tensors to known shapes when evaluating
1421 for idx, val in enumerate(placeholder_vals):
1422 interpreter.resize_tensor_input(
1423 input_details[idx]["index"], placeholder_shapes[idx]
1424 )
1425 interpreter.allocate_tensors()
1426
Jeremy Johnson015c3552022-02-23 12:15:03 +00001427 assert len(input_details) == len(
1428 placeholder_vals
1429 ), "number of placeholder mismatch"
1430
1431 for idx, val in enumerate(placeholder_vals):
1432 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1433
1434 interpreter.invoke()
1435 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1436
1437 np.save(
1438 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1439 )
1440
1441 # Result tensor name would change after converting to TFLite flatbuffer
1442 # Overwrite the information from TFLite models directly.
1443 # Assume single result tensor now
1444 tflite_result_name = output_details[0]["name"]
1445
Eric Kunze97b00272023-07-20 10:52:56 -07001446 _, test_name = os.path.split(test_dir)
1447
Jeremy Johnson015c3552022-02-23 12:15:03 +00001448 # Write out test descriptor
1449 write_test_json(
1450 filename=os.path.join(test_dir, "test.json"),
1451 tf_model_filename=tf_model_filename,
1452 tf_result_npy_filename=tf_result_npy_filename,
1453 tf_result_name=tf_result_name,
1454 tflite_model_filename=tflite_model_filename,
1455 tflite_result_npy_filename=tflite_result_npy_filename,
1456 tflite_result_name=tflite_result_name,
1457 ifm_name=placeholder_names,
1458 ifm_file=placeholder_npy_filenames,
1459 ifm_shape=placeholder_shapes,
TatWai Chong6a46b252024-01-12 13:13:22 -08001460 ifm_dynamic=placeholder_dynamic,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001461 framework_exclusions=excluded_framework_list,
1462 quantized=is_quantized,
Eric Kunze97b00272023-07-20 10:52:56 -07001463 test_name=test_name,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001464 )
1465 except Exception as e:
1466 msg = "Error running task: {}".format(e)
1467 print(msg)
1468 print(
1469 "".join(
1470 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1471 )
1472 )
1473 return False
1474 return True
1475
1476
1477def build_const_net(
1478 args,
1479 curr_shape,
1480 op_name,
1481 dtype,
1482 excluded_framework_list,
1483 quantized_inference_dtype,
1484 result_name,
1485 seed,
1486 rng,
1487 filter,
1488 unit_test_args,
1489):
1490
1491 if quantized_inference_dtype:
1492 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1493 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1494 else:
1495 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1496 test_dir = os.path.join(args.output_dir, test_dir)
1497
1498 # If the operator has an additional function to generate arguments, call it
1499 # here and iterate through the argument list that it generates
1500 op = TF_OP_LIST[op_name]
1501 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1502
TatWai Chongfd629052022-07-25 04:01:58 +00001503 try:
1504 rank_lo, rank_hi = op["rank"]
1505 except KeyError:
1506 # Set testing rank to (1, 4) in default.
1507 rank_lo = 1
1508 rank_hi = 4
1509
1510 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1511 return
1512
Jeremy Johnson015c3552022-02-23 12:15:03 +00001513 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1514 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001515 # Only filter on the full test_name, not the output directory
1516 _, test_name = os.path.split(test_dir + desc)
1517 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001518 unit_test_args.append(
1519 [
1520 op_name,
1521 args,
1522 test_dir + desc,
1523 curr_shape,
1524 addl_args,
1525 dtype,
1526 excluded_framework_list,
1527 quantized_inference_dtype,
1528 result_name,
1529 seed,
1530 ]
1531 )
1532
1533
1534# python hash is not reproducible, create hash for our purpose
1535def op_name_hash(op_name):
1536 result = 0xDEADBEEF
1537 for ch in op_name:
1538 if result & 1:
1539 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1540 else:
1541 result = (ord(ch) << 24) ^ (result >> 1)
1542
1543 return result
1544
1545
1546def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1547
1548 if not args.quiet:
1549 print(
1550 "Generating tests for {} ".format(
1551 op_name
1552 )
1553 )
1554
1555 op = TF_OP_LIST[op_name]
1556
1557 # Seed the RNG so that we get the same random tests for each test each time
1558 # If the number of tests for a given generation function changes, the tests
1559 # for that operator may also change accordingly, but this will at least keep
1560 # down churn across operators.
1561
1562 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1563 np.int32
1564 ).max
1565 rng = np.random.default_rng(bounded_hash_val)
1566
1567 # this is a dictionary with 'tf' and 'tflite' as key
1568 # and value being the data types we want to test under these framework
1569
1570 if isinstance(op["types"], dict):
1571 try:
1572 tf_dtypes = op["types"]["tf"]
1573 except KeyError:
1574 tf_dtypes = []
1575 try:
1576 tflite_dtypes = op["types"]["tflite"]
1577 except KeyError:
1578 tflite_dtypes = []
1579 elif isinstance(op["types"], list):
1580 tf_dtypes = op["types"]
1581 tflite_dtypes = op["types"]
1582
1583 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1584 tflite_quantized_dtypes = []
1585 tflite_nonquantized_dtypes = []
1586 for dtype in tflite_dtypes:
1587 if isinstance(dtype, QuantType):
1588 tflite_quantized_dtypes.append(dtype)
1589 else:
1590 tflite_nonquantized_dtypes.append(dtype)
1591
1592 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1593 set(tflite_nonquantized_dtypes)
1594 )
1595 nonquantized_dtypes = list(nonquantized_dtypes_set)
1596 quantized_dtypes = tflite_quantized_dtypes
1597
Jerry Ge5dd5a552023-05-23 22:41:20 +00001598 # append custom_shapes or replace shape_list with custom_shapes
1599 try:
1600 custom_shapes = op["custom_shapes"]
1601 if custom_shapes["custom_shape_only"]:
1602 shape_list = custom_shapes["shape_list"]
1603 else:
Jerry Geabdac232023-06-12 16:27:16 +00001604 shape_list = shape_list.copy()
Won Jeonf9c0cee2023-09-18 16:32:45 -07001605 shape_list.extend(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001606 except KeyError:
1607 pass
1608
Jeremy Johnson015c3552022-02-23 12:15:03 +00001609 # populate non quantized unit test arguments
1610 for dtype in nonquantized_dtypes:
1611
1612 excluded_framework_set = set(ALL_FRAMEWORKS)
1613 if dtype in tf_nonquantized_dtypes:
1614 excluded_framework_set.remove("tf")
1615 if dtype in tflite_nonquantized_dtypes:
1616 excluded_framework_set.remove("tflite")
1617 excluded_framework_list = list(excluded_framework_set)
1618
1619 for curr_shape in shape_list:
1620 build_const_net(
1621 args,
1622 curr_shape,
1623 op_name,
1624 dtype,
1625 excluded_framework_list,
1626 None,
1627 result_name,
1628 bounded_hash_val,
1629 rng,
1630 filter,
1631 unit_test_args,
1632 )
1633
1634 # populate quantized unit test arguments
1635 # must exclude 'tf' and source dtype being tf.float32
1636 for dtype in quantized_dtypes:
1637 for curr_shape in shape_list:
1638 build_const_net(
1639 args,
1640 curr_shape,
1641 op_name,
1642 tf.float32,
1643 ["tf"],
1644 dtype,
1645 result_name,
1646 bounded_hash_val,
1647 rng,
1648 filter,
1649 unit_test_args,
1650 )
1651
1652 return unit_test_args
1653
1654
1655def createDynamicOpLists():
1656 """The templated operators are conv2d-style operators with a number of kernel
1657 sizes. Since the operator is unchanged, we generate the range of kernel
1658 sizes here in this loop and remove the original templates from the list.
1659
1660 This could be expanded to non-conv2d-style operators in the future."""
1661
1662 # Dynamically create op lists for convolutions with a list of kernel sizes
1663 KERNELS = [
1664 [1, 1],
1665 [3, 3],
1666 [5, 5],
1667 ]
1668
TatWai Chongfd629052022-07-25 04:01:58 +00001669 # dim = [D, H, W]
1670 KERNELS_3D = [
1671 [1, 1, 1],
1672 [2, 3, 3],
1673 [3, 5, 5],
1674 ]
1675
Jeremy Johnson015c3552022-02-23 12:15:03 +00001676 TEMPLATE_LIST = [
1677 "conv2d",
1678 "conv2d_bias",
1679 "conv2d_relu",
1680 "conv2d_relu6",
1681 "conv2d_relu_n1_to_1",
1682 "conv2d_tanh",
1683 "depthwise_conv2d",
1684 "depthwise_conv2d_bias",
1685 "transpose_conv2d",
1686 ]
1687
TatWai Chongfd629052022-07-25 04:01:58 +00001688 TEMPLATE_LIST_CONV3D = [
1689 "conv3d",
1690 "conv3d_bias",
1691 ]
1692
Jeremy Johnson015c3552022-02-23 12:15:03 +00001693 for t in TEMPLATE_LIST:
1694 for k in KERNELS:
1695 testName = "{}_{}x{}".format(t, k[0], k[1])
1696 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1697 TF_OP_LIST[testName]["filter"] = k
1698 TF_OP_LIST[testName]["template"] = False
1699
TatWai Chongfd629052022-07-25 04:01:58 +00001700 # The existing operators don't support the dimension of kernel that is higher than 2.
1701 for t in TEMPLATE_LIST_CONV3D:
1702 for k in KERNELS_3D:
1703 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1704 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1705 TF_OP_LIST[testName]["filter"] = k
1706 TF_OP_LIST[testName]["template"] = False
1707
Jeremy Johnson015c3552022-02-23 12:15:03 +00001708 # Delete any templates after having created any dynamic ops
1709 # This is a two-pass operation because it's bad practice to delete
1710 # keys from dictionaries while iterating
1711 keyList = []
1712 for k in TF_OP_LIST:
1713 try:
1714 if TF_OP_LIST[k]["template"]:
1715 keyList.append(k)
1716 continue
1717 except KeyError:
1718 pass
1719
1720 for k in keyList:
1721 del TF_OP_LIST[k]
1722
1723
1724def main():
1725 parser = argparse.ArgumentParser()
1726 parser.add_argument(
1727 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1728 )
1729 parser.add_argument(
1730 "--random-shapes",
1731 dest="random_shapes",
1732 default=0,
1733 type=int,
1734 help=(
1735 "Use N random shapes of each rank for generating tests,"
1736 "seeded with random seed"
1737 ),
1738 )
1739 parser.add_argument(
1740 "-o",
1741 "--output-dir",
1742 dest="output_dir",
1743 default=".",
1744 type=str,
1745 help="Test output directory path prefix",
1746 )
1747 parser.add_argument(
1748 "-q",
1749 "--quiet",
1750 dest="quiet",
1751 default=False,
1752 action="store_true",
1753 help="Do not print test names",
1754 )
1755 parser.add_argument(
1756 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1757 )
1758 parser.add_argument(
1759 "-m",
1760 "--tflite-kernel-mode",
1761 dest="tflite_kernel_mode",
1762 type=str,
1763 choices=["reference", "optimized"],
1764 default="reference",
1765 help="TFLite interpreter kernel mode",
1766 )
1767 parser.add_argument(
1768 "--num-samples",
1769 dest="num_samples",
1770 default=200,
1771 type=int,
1772 help="Number of input samples for post-training quantization",
1773 )
1774 parser.add_argument(
1775 "--filter",
1776 dest="filter",
1777 default="",
1778 type=str,
1779 help="Filter test names by this expression",
1780 )
1781 args = parser.parse_args()
1782
1783 # Turn the filter into a re object if present
1784 filter = None
1785 if args.filter != "":
1786 filter = re.compile(args.filter)
1787
1788 # Autodetect CPU count
1789 if args.jobs <= 0:
1790 args.jobs = os.cpu_count()
1791
1792 # Disable TF info messages
1793 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1794
1795 try:
1796 os.makedirs(args.output_dir)
1797 except FileExistsError:
1798 pass
1799
1800 if args.random_shapes:
1801 gen_rand_shapes(args)
1802
1803 # Build dynamic ops
1804 createDynamicOpLists()
1805
1806 # Generate the test list and arguments to run_unit_test()
1807 unit_test_args = []
1808
1809 for op in TF_OP_LIST:
1810 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1811
1812 errors = 0
1813 for t in unit_test_args:
1814 if not run_unit_test(*t):
1815 errors = errors + 1
1816
1817 if not args.quiet:
1818 print("\nAll tasks done - with {} errors".format(errors))
1819
1820 return 1 if errors else 0
1821
1822
1823if __name__ == "__main__":
1824 exit(main())