blob: 8ae028613f5b7f33dafa1587e3327f9b9c59821e [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Jerry Ge54bb61e2023-12-20 22:21:24 +00002# Copyright (c) 2020-2024, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
Tai Lycf84bc92023-09-07 20:49:09 +000031
Jeremy Johnson015c3552022-02-23 12:15:03 +000032from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700177 "rank": (0, 4),
178 "custom_shapes": {
179 "custom_shape_only": False,
180 "shape_list": [()],
181 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000182 },
183 "bitwise_and": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_or": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_not": {
194 "operands": (1, 0),
195 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "bitwise_xor": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
201 "types": {"tf": TYPE_I}, # Not supported in TF Lite
202 },
203 "logical_and": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_or": {
209 "operands": (2, 0),
210 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "logical_not": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
216 "types": TYPE_B,
217 },
218 "reduce_any": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
221 "types": TYPE_B,
222 },
223 "reduce_all": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800226 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000227 },
228 "reduce_min": {
229 "operands": (1, 0),
230 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
231 "types": {
232 "tf": TYPE_FI,
233 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
234 },
235 },
236 "reduce_max": {
237 "operands": (1, 0),
238 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
239 "types": {
240 "tf": TYPE_FI,
241 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
242 },
243 },
244 "reduce_sum": {
245 "operands": (1, 0),
246 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
247 "types": {
248 "tf": TYPE_F,
249 # v2 converter doesn't recognize quantized reduce_sum
250 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
251 "tflite": TYPE_F,
252 },
253 },
254 "reduce_mean": {
255 "operands": (1, 0),
256 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
257 "types": {
258 "tf": TYPE_F,
259 "tflite": list(
260 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
261 ),
262 },
263 },
264 "reduce_product": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
267 "types": TYPE_F,
268 },
269 "min": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "max": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
277 "types": TYPE_FI,
278 },
279 "pow": {
280 "operands": (2, 0),
281 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
282 # Technically, integer is supported, but only for positive exponents.
283 # Needs a random argument generator.
284 "types": TYPE_F,
285 },
286 "abs": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "ceil": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "floor": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "log": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "negate": {
307 "operands": (1, 0),
308 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
309 "types": TYPE_F,
310 },
311 "rsqrt": {
312 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800313 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
317 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000318 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800319 "sign": {
320 "operands": (1, 0),
321 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
322 "types": {
323 "tf": TYPE_F,
324 },
325 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000326 "sigmoid": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
336 "tanh": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
339 "types": {
340 "tf": TYPE_F,
341 "tflite": list(
342 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
343 ),
344 },
345 },
Won Jeon78155c62023-06-10 00:20:04 +0000346 "erf": {
347 "operands": (1, 0),
348 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tf": TYPE_F,
351 },
352 },
Luke Hutton41601862022-12-06 17:29:15 +0000353 "sin": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000356 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000357 },
358 "cos": {
359 "operands": (1, 0),
360 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000361 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000362 },
Luke Hutton2138a192022-12-15 11:01:39 +0000363 "atan2": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
366 "types": {
367 "tflite": TYPE_F,
368 },
369 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000370 "square": {
371 "operands": (1, 0),
372 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
373 "types": TYPE_F,
374 },
375 "squared_difference": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000378 "types": {
379 "tf": TYPE_F,
380 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
381 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000382 },
383 "equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "greater_equal": {
389 "operands": (2, 0),
390 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
391 "types": TYPE_FI,
392 },
393 "greater": {
394 "operands": (2, 0),
395 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
396 "types": TYPE_FI,
397 },
398 "less": {
399 "operands": (2, 0),
400 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
401 "types": TYPE_FI,
402 },
403 "less_equal": {
404 "operands": (2, 0),
405 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
406 "types": TYPE_FI,
407 },
408 "conv2d_TEMPLATE": {
409 "operands": (1, 1),
410 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 "conv2d_relu6_TEMPLATE": {
437 "operands": (1, 2),
438 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
439 "types": {
440 "tf": [tf.float32],
441 "tflite": [
442 tf.float32,
443 QuantType.CONV_U8_U8,
444 QuantType.CONV_I8_I8,
445 QuantType.CONV_I16_I8,
446 ],
447 },
448 "template": True,
449 },
450 "conv2d_relu_n1_to_1_TEMPLATE": {
451 "operands": (1, 2),
452 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
453 "types": {
454 "tf": [tf.float32],
455 "tflite": [
456 tf.float32,
457 QuantType.CONV_U8_U8,
458 QuantType.CONV_I8_I8,
459 QuantType.CONV_I16_I8,
460 ],
461 },
462 "template": True,
463 },
464 # This test is converted as:
465 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
466 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
467 "conv2d_tanh_TEMPLATE": {
468 "operands": (1, 2),
469 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
470 "types": {
471 "tf": [tf.float32],
472 "tflite": [
473 tf.float32,
474 QuantType.CONV_U8_U8,
475 QuantType.CONV_I8_I8,
476 QuantType.CONV_I16_I8,
477 ],
478 },
479 "template": True,
480 },
481 "conv2d_bias_TEMPLATE": {
482 "operands": (1, 2),
483 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
484 "types": {
485 "tf": [tf.float32],
486 "tflite": [
487 tf.float32,
488 QuantType.CONV_U8_U8,
489 QuantType.CONV_I8_I8,
490 QuantType.CONV_I16_I8,
491 ],
492 },
493 "bias": True,
494 "template": True,
495 },
TatWai Chongfd629052022-07-25 04:01:58 +0000496 "conv3d_TEMPLATE": {
497 "operands": (1, 1),
498 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 # Quantization to 16x8-bit not yet supported by tflite.
506 ],
507 },
508 "template": True,
509 "rank": (1, 5),
510 },
511 "conv3d_bias_TEMPLATE": {
512 "operands": (1, 2),
513 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 # Quantization to 16x8-bit not yet supported by tflite.
521 ],
522 },
523 "bias": True,
524 "template": True,
525 "rank": (1, 5),
526 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000527 "depthwise_conv2d_TEMPLATE": {
528 "operands": (1, 1),
529 "build_fcn": (
530 TBuilder.DepthwiseConv2d,
531 TGen.tgDepthwiseConv2d,
532 ArgGen.agDepthwiseConv2d,
533 ),
534 "types": {
535 "tf": [tf.float32],
536 "tflite": [
537 tf.float32,
538 QuantType.CONV_U8_U8,
539 QuantType.CONV_I8_I8,
540 QuantType.CONV_I16_I8,
541 ],
542 },
543 "template": True,
544 },
545 "depthwise_conv2d_bias_TEMPLATE": {
546 "operands": (1, 2),
547 "build_fcn": (
548 TBuilder.DepthwiseConv2dWithBias,
549 TGen.tgDepthwiseConv2d,
550 ArgGen.agDepthwiseConv2d,
551 ),
552 "types": {
553 "tf": [tf.float32],
554 "tflite": [
555 tf.float32,
556 QuantType.CONV_U8_U8,
557 QuantType.CONV_I8_I8,
558 QuantType.CONV_I16_I8,
559 ],
560 },
561 "bias": True,
562 "template": True,
563 },
564 "transpose_conv2d_TEMPLATE": {
565 "operands": (1, 1),
566 "build_fcn": (
567 TBuilder.TransposeConv2d,
568 TGen.tgTransposeConv2d,
569 ArgGen.agTransposeConv2d,
570 ),
571 "types": {
572 "tf": [tf.float32],
573 "tflite": [
574 tf.float32,
575 QuantType.CONV_U8_U8,
576 QuantType.CONV_I8_I8,
577 QuantType.CONV_I16_I8,
578 ],
579 },
580 "template": True,
581 },
582 "argmax": {
583 "operands": (1, 0),
584 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
585 "types": {"tf": TYPE_F},
586 },
587 "avg_pool2d": {
588 "operands": (1, 0),
589 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
590 "types": {
591 "tf": TYPE_F,
592 "tflite": list(
593 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
594 ),
595 },
596 },
597 "max_pool2d": {
598 "operands": (1, 0),
599 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
600 "types": {
601 "tf": TYPE_F,
602 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
603 # ALL_I16 not supported yet
604 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
605 # QI16 is missing from MaxPoolOperandAndResultConstraints
606 # If adding QI16 back this test can run through.
607 },
608 },
609 "reshape": {
610 "operands": (1, 0),
611 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
612 "types": TYPE_FI,
613 },
614 "transpose": {
615 "operands": (1, 0),
616 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
617 "types": TYPE_FI,
618 },
619 "slice": {
620 "operands": (1, 0),
621 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
622 "types": TYPE_FI,
623 },
624 "strided_slice": {
625 "operands": (1, 0),
626 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
627 "types": TYPE_FI,
628 },
629 "select": {
630 "operands": (3, 0),
631 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
632 "types": TYPE_FI,
633 },
634 "addn": {
635 "operands": (4, 0),
636 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
637 "types": TYPE_FI,
638 },
639 "concatv2": {
640 "operands": (4, 0),
641 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
642 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700643 "rank": (0, 4),
644 "custom_shapes": {
645 "custom_shape_only": False,
646 "shape_list": [()],
647 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000648 },
649 "stack": {
650 "operands": (4, 0),
651 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
652 "types": TYPE_FI,
653 },
654 "unstack": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
657 "types": TYPE_F,
658 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000659 "mirrorpad": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
662 "types": TYPE_FI,
663 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000664 "pad": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800667 "types": {
668 "tf": TYPE_F,
669 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
670 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000671 },
672 "expand_dims": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
675 "types": TYPE_FI,
676 },
677 "shape": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
680 "types": TYPE_FI,
681 },
682 "rank": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_FI,
686 },
687 "fill": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
690 "types": TYPE_FI,
691 },
692 "elu": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
695 "types": TYPE_F,
696 },
697 "softmax": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
700 "types": {
701 "tf": TYPE_F,
702 "tflite": list(
703 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
704 ),
705 },
706 },
707 "log_softmax": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
710 "types": TYPE_F,
711 },
Jerry Ge28811d92023-12-05 00:53:26 +0000712 "dynamic_linear": {
713 "operands": (1, 0),
714 "build_fcn": (TBuilder.DynamicLinear, TGen.tgBasic, ArgGen.agNone),
715 "types": {
716 "tf": [],
717 "tflite": list(TYPE_F),
718 },
719 "custom_shapes": {
720 "custom_shape_only": True,
721 "shape_list": [(14, 19)],
722 },
723 # number of operands of tuples which spcifies which dim to set to None
724 # In this case, we have 1 input. So we have 1 tuple
725 # We're setting the first input's first dim to None
726 "dynamic_shape_dim": [
727 (0,),
728 ],
729 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000730 "matmul": {
731 "operands": (2, 0),
732 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
733 "types": {
734 "tf": TYPE_F,
735 "tflite": list(
736 TYPE_F
737 + [QuantType.ALL_U8, QuantType.ALL_I8]
738 # 16 bits matmul fail to convert
739 ),
740 },
741 },
742 "add_scalar": {
743 "operands": (1, 0),
744 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
745 "types": TYPE_F,
746 },
747 "add_1d": {
748 "operands": (2, 0),
749 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
750 "types": TYPE_F,
751 },
752 "split": {
753 "operands": (1, 0),
754 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
755 "types": TYPE_FI,
756 },
757 "tile": {
758 "operands": (1, 0),
759 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
760 "types": TYPE_FI,
761 },
762 "reverse": {
763 "operands": (1, 0),
764 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
765 "types": {"tf": TYPE_FI},
766 },
767 "gather": {
768 "operands": (1, 0),
769 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
770 "types": TYPE_FI,
771 },
772 "gather_nd": {
773 "operands": (1, 0),
774 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
775 "types": TYPE_FI,
776 },
777 "scatter_nd": {
778 "operands": (1, 0),
779 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
780 "types": TYPE_FI,
781 },
782 "space_to_batch": {
783 "operands": (1, 0),
784 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
785 "types": TYPE_F,
786 },
TatWai Chongbef907a2024-01-23 09:40:37 -0800787 "dynamic_space_to_batch": {
788 "operands": (1, 0),
789 "build_fcn": (
790 TBuilder.DynamicSpaceToBatch,
791 TGen.tgBasic,
792 ArgGen.agSpaceToBatch,
793 ),
794 "types": TYPE_F,
795 "custom_shapes": {
796 "custom_shape_only": True,
797 "shape_list": [(13, 21, 3)],
798 },
799 "dynamic_shape_dim": [
800 (
801 0,
802 1,
803 ),
804 ],
805 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000806 "batch_to_space": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
809 "types": TYPE_F,
810 },
Jerry Ge28811d92023-12-05 00:53:26 +0000811 "dynamic_batch_to_space": {
812 "operands": (1, 0),
813 "build_fcn": (
814 TBuilder.DynamicBatchToSpace,
815 TGen.tgBasic,
816 ArgGen.agBatchToSpace,
817 ),
818 "types": TYPE_F,
819 "custom_shapes": {
820 "custom_shape_only": True,
821 "shape_list": [(8, 4, 4, 4)],
822 },
823 # number of operands of tuples which spcifies which dim to set to None
824 # In this case, we have 1 input. So we have 1 tuple
825 # We're setting the first input's 0th dim to None
826 "dynamic_shape_dim": [
827 (0,),
828 ],
829 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000830 "space_to_depth": {
831 "operands": (1, 0),
832 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
833 "types": TYPE_F,
834 },
Jerry Ge28811d92023-12-05 00:53:26 +0000835 "dynamic_space_to_depth": {
836 "operands": (1, 0),
837 "build_fcn": (TBuilder.DynamicSpaceToDepth, TGen.tgBasic, ArgGen.agNone),
838 "types": {
839 "tf": [],
840 "tflite": list(TYPE_F),
841 },
842 "custom_shapes": {
843 "custom_shape_only": True,
844 "shape_list": [(1, 32, 32, 8)],
845 },
846 # number of operands of tuples which spcifies which dim to set to None
847 # In this case, we have 1 input. So we have 1 tuple
TatWai Chong6a46b252024-01-12 13:13:22 -0800848 # We're setting the first input's first (batch) dim to None
Jerry Ge28811d92023-12-05 00:53:26 +0000849 "dynamic_shape_dim": [
TatWai Chong6a46b252024-01-12 13:13:22 -0800850 (0,),
Jerry Ge28811d92023-12-05 00:53:26 +0000851 ],
852 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000853 "depth_to_space": {
854 "operands": (1, 0),
855 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
856 "types": TYPE_F,
857 },
Jerry Ge28811d92023-12-05 00:53:26 +0000858 "dynamic_depth_to_space": {
859 "operands": (1, 0),
860 "build_fcn": (TBuilder.DynamicDepthToSpace, TGen.tgBasic, ArgGen.agNone),
861 "types": {
862 "tf": [],
863 "tflite": list(TYPE_F),
864 },
865 "custom_shapes": {
866 "custom_shape_only": True,
867 "shape_list": [(1, 1, 1, 4)],
868 },
869 # number of operands of tuples which spcifies which dim to set to None
870 # In this case, we have 1 input. So we have 1 tuple
TatWai Chong6a46b252024-01-12 13:13:22 -0800871 # We're setting the first input's first (batch) dim to None
Jerry Ge28811d92023-12-05 00:53:26 +0000872 "dynamic_shape_dim": [
TatWai Chong6a46b252024-01-12 13:13:22 -0800873 (0,),
Jerry Ge28811d92023-12-05 00:53:26 +0000874 ],
875 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000876 "one_hot": {
877 "operands": (3, 1),
878 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
879 "types": TYPE_FI,
880 },
881 "fakequant": {
882 "operands": (1, 0),
883 "build_fcn": (
884 TBuilder.Fakequant,
885 TGen.tgBasic,
886 ArgGen.agFakequant,
887 ),
888 "types": {"tf": TYPE_F},
889 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800890 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000891 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800892 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700893 "types": {
894 "tf": TYPE_F,
895 "tflite": list(
896 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
897 ),
898 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000899 "custom_shapes": {
900 "custom_shape_only": False,
901 "shape_list": [(3, 1, 1, 7)],
902 },
TatWai Chongf7326092022-06-08 12:17:14 -0700903 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000904 "left_shift": {
905 "operands": (1, 0),
906 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
907 "types": {"tf": [tf.int32]},
908 },
909 "right_shift": {
910 "operands": (1, 0),
911 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
912 "types": {
913 "tf": [
914 tf.int32,
915 ]
916 },
917 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700918 "while": {
919 "operands": (1, 0),
920 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
921 "types": {
922 "tflite": list(TYPE_F),
923 },
924 },
925 "lstm": {
926 "operands": (1, 0),
927 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
928 "types": {
929 "tflite": [
930 tf.float32,
931 # tf.int32
932 ]
933 },
934 },
Tai Lycf84bc92023-09-07 20:49:09 +0000935 "lstm_stateful": {
936 "operands": (1, 0),
937 "build_fcn": (TBuilder.SLSTM, TGen.tgRecurrent, ArgGen.agNone),
938 "types": {
939 "tflite": [
940 tf.float32,
941 ]
942 },
Jerry Ged5b15122024-03-26 20:51:48 +0000943 "num_variables": 2,
Tai Lycf84bc92023-09-07 20:49:09 +0000944 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700945 "gru": {
946 "operands": (1, 0),
947 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
948 "types": {
949 "tflite": [
950 tf.float32,
951 # tf.int32
952 ]
953 },
954 },
955 "rnn": {
956 "operands": (1, 0),
957 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
958 "types": {
959 "tflite": [
960 tf.float32,
961 ]
962 },
963 },
Tai Lycf84bc92023-09-07 20:49:09 +0000964 "callonce": {
965 "operands": (1, 0),
966 "build_fcn": (TBuilder.CallOnce, TGen.tgBasic, ArgGen.agNone),
967 "types": {
968 "tflite": [tf.float32],
969 },
970 "custom_shapes": {
971 "custom_shape_only": True,
972 "shape_list": [(1,)],
973 },
974 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000975 "rfft2d": {
976 "operands": (1, 0),
977 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
978 "types": {
979 "tflite": TYPE_F,
980 },
981 },
Luke Hutton714aa602023-02-08 19:45:26 +0000982 "real": {
983 "operands": (1, 0),
984 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
985 "types": {
986 "tflite": [tf.complex64],
987 },
988 },
989 "imag": {
990 "operands": (1, 0),
991 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
992 "types": {
993 "tflite": [tf.complex64],
994 },
995 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000996 "broadcastto": {
997 "operands": (1, 1),
998 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
999 "types": {
1000 "tf": TYPE_FIB,
1001 },
1002 },
Jeremy Johnson015c3552022-02-23 12:15:03 +00001003}
1004
1005# Shapes to be tested; default can be overwritten
1006shape_list = [
1007 (1,),
1008 (64,),
1009 (14, 19),
1010 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +00001011 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001012 (1, 4, 4, 4),
1013 (1, 8, 4, 17),
1014 (1, 4, 8, 19),
1015 (1, 32, 32, 8),
1016 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -08001017 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +00001018 (2, 2, 7, 7, 2),
1019 (1, 4, 8, 21, 17),
1020 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001021]
1022
1023
1024def gen_rand_shapes(args):
1025 """Overwrite the global shape list with a new list of random shapes"""
1026 global shape_list
1027
1028 rng = np.random.default_rng(args.random_seed)
1029
1030 # Don't let things get too big... cap the maximum volume, but let
1031 # an individual dimension be 1..47
1032 max_total_volume = 32 * 32 * 4
1033
1034 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +00001035 # Only iterate over ranks 2, 3, 4, and 5
1036 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001037 for n in range(args.random_shapes):
1038 new_shape = rng.integers(1, 48, size=rank)
1039
TatWai Chongfd629052022-07-25 04:01:58 +00001040 # Set the batch dimension on 4D or 5D objects to 1
1041 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +00001042 new_shape[0] = 1
1043
1044 # Limit the total shape volume and throw out any
1045 # shapes that wouldn't leave at least size=2 in some non-batch dimension
1046 volume = 1
1047 skip_shape = False
1048 for i in range(rank):
1049
1050 volume *= new_shape[i]
1051
1052 # Reduce the shape, while it's larger than the maximum volume
1053 while volume > max_total_volume:
1054 new_shape[i] = new_shape[i] // 2
1055 volume = volume // 2
1056
1057 # Now an untenable dimension size? Skip this one.
1058 if new_shape[i] < 1:
1059 skip_shape = True
1060
1061 if not skip_shape:
1062 shape_list.append(tuple(new_shape))
1063
1064
1065# Construct, run and save a whole tensorflow tf.function to a protobuf file
1066# or convert to .tflite if it's quantized unit test
1067def run_unit_test(
1068 op_name,
1069 args,
1070 test_dir,
1071 curr_shape,
1072 addl_args,
1073 dtype,
1074 excluded_framework_list,
1075 quantized_inference_dtype,
1076 result_name,
1077 seed,
1078):
1079
1080 try:
1081 op = TF_OP_LIST[op_name]
1082 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1083
1084 # Get and seed a random number generator for this test
1085 rng = np.random.default_rng(seed)
1086
1087 # return placeholders=(str: name, np.array: value)
1088 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +00001089 placeholders, consts = (
Won Jeon6c93f412023-07-08 07:04:08 +00001090 tensor_gen_fcn(op, curr_shape, dtype, rng, False)
Won Jeone2325d12023-06-10 15:25:54 +00001091 if tensor_gen_fcn.__name__ == "tgBFuzz"
1092 else tensor_gen_fcn(op, curr_shape, dtype, rng)
1093 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001094
1095 # if test doesn't have any placeholders/consts, terminated
1096 if len(placeholders) == 0 and len(consts) == 0:
1097 return True
1098
1099 if not args.quiet:
1100 print(" {} ".format(test_dir))
1101
1102 try:
1103 os.mkdir(test_dir)
1104 except FileExistsError:
1105 pass
1106
1107 const_nodes = [value for name, value in consts]
1108
1109 num_placeholders = len(placeholders)
1110 # if test is quantized, create tensor quantization metadata info for
1111 # each input tensor, based on different quantized type
1112 if quantized_inference_dtype:
1113 is_quantized = True
1114 # TODO: support INT8 IFM x INT4 weight later
1115 if quantized_inference_dtype == QuantType.ALL_U8:
1116 qzero = [128] * num_placeholders
1117 numpy_dtype = [np.uint8] * num_placeholders
1118 tflite_inference_dtype = tf.uint8
1119 elif quantized_inference_dtype == QuantType.ALL_I8:
1120 qzero = [0] * num_placeholders
1121 numpy_dtype = [np.int8] * num_placeholders
1122 tflite_inference_dtype = tf.int8
1123 elif quantized_inference_dtype == QuantType.ALL_I16:
1124 qzero = [0] * num_placeholders
1125 numpy_dtype = [np.int16] * num_placeholders
1126 tflite_inference_dtype = tf.int16
1127 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1128 assert (
1129 num_placeholders == 1
1130 ), "Unsupported number of placeholders for Convolution: {}".format(
1131 num_placeholders
1132 )
1133 qzero = [128] * num_placeholders
1134 if num_placeholders == 2:
1135 numpy_dtype = [np.uint8, np.uint8]
1136 else:
1137 numpy_dtype = [np.uint8, np.uint8, np.int32]
1138 tflite_inference_dtype = tf.uint8
1139 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1140 assert (
1141 num_placeholders == 1
1142 ), "Unsupported number of placeholders for Convolution: {}".format(
1143 num_placeholders
1144 )
1145 qzero = [0] * num_placeholders
1146 if num_placeholders == 2:
1147 numpy_dtype = [np.int8, np.int8]
1148 else:
1149 numpy_dtype = [np.int8, np.int8, np.int32]
1150 tflite_inference_dtype = tf.int8
1151 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1152 assert (
1153 num_placeholders == 1
1154 ), "Unsupported number of placeholders for Convolution: {}".format(
1155 num_placeholders
1156 )
1157 if num_placeholders == 2:
1158 qzero = [0, 0]
1159 numpy_dtype = [np.int16, np.int8]
1160 else:
1161 qzero = [0, 0, 0]
1162 numpy_dtype = [
1163 np.int16,
1164 np.int8,
1165 np.int64,
1166 ] # np.int64 to represent 40 bits accumulator
1167 tflite_inference_dtype = tf.int16
1168 else:
1169 raise Exception(
1170 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1171 )
1172
1173 else:
1174 is_quantized = False
1175
1176 tf_model_filename = None
1177 tf_result_npy_filename = None
1178 tf_result_name = None
1179
1180 tflite_model_filename = None
1181 tflite_result_npy_filename = None
1182 tflite_result_name = None
1183
1184 placeholder_names = []
1185 placeholder_vals = []
1186 placeholder_signatures = ()
1187 placeholder_npy_filenames = []
1188 placeholder_shapes = []
TatWai Chong6a46b252024-01-12 13:13:22 -08001189 placeholder_dynamic = False
Jeremy Johnson015c3552022-02-23 12:15:03 +00001190
1191 for idx, (name, val) in enumerate(placeholders):
Jerry Ge54bb61e2023-12-20 22:21:24 +00001192 input_shape = tuple(val.shape)
1193
Jerry Ge28811d92023-12-05 00:53:26 +00001194 try:
1195 dynamic_shape_dim_tuples = op["dynamic_shape_dim"]
1196 dim_tuple = dynamic_shape_dim_tuples[idx]
Jerry Ge54bb61e2023-12-20 22:21:24 +00001197 input_shape = list(input_shape)
TatWai Chongbef907a2024-01-23 09:40:37 -08001198
1199 # Set the dimensions of input that are listed in the builder profile to unknown.
1200 for dim in dim_tuple:
1201 input_shape[dim] = None
1202
TatWai Chong6a46b252024-01-12 13:13:22 -08001203 # When any dimension size is unknown, mark the placeholder as dynamic type.
1204 placeholder_dynamic = True
Jerry Ge28811d92023-12-05 00:53:26 +00001205
Jerry Ge54bb61e2023-12-20 22:21:24 +00001206 addl_args.append(tuple(input_shape))
Jerry Ge28811d92023-12-05 00:53:26 +00001207 except KeyError:
1208 pass
1209
Jeremy Johnson015c3552022-02-23 12:15:03 +00001210 placeholder_names.append(name)
1211 placeholder_signatures = placeholder_signatures + (
Jerry Ge54bb61e2023-12-20 22:21:24 +00001212 tf.TensorSpec(shape=input_shape, dtype=val.dtype, name=name),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001213 )
1214 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1215 placeholder_shapes.append(val.shape)
1216
1217 # Get test builder class
1218 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1219 concrete_function = tf.function(input_signature=placeholder_signatures)(
1220 fcn_node.eval
1221 ).get_concrete_function()
1222
1223 if is_quantized:
1224
1225 assert dtype is tf.float32, "quantized test must come from float32 graph"
1226
1227 # 1. Quantize float placeholder npy to quantized to feed the graph
1228 for idx, (name, val) in enumerate(placeholders):
1229
1230 # we use np.amin()/np.amax() to determine dynamic range
1231 # for quantized test
1232 zeropoint = 0
1233 scale = 1.0
1234 if numpy_dtype[idx] != np.int64:
1235 qmin = np.iinfo(numpy_dtype[idx]).min
1236 qmax = np.iinfo(numpy_dtype[idx]).max
1237 num_bits = np.iinfo(numpy_dtype[idx]).bits
1238 # 40 bit is represented as np.int64
1239 else:
1240 num_bits = 40
1241 qmin = -(1 << num_bits)
1242 qmax = (1 << num_bits) - 1
1243
1244 min_val = np.amin(val)
1245 max_val = np.amax(val)
1246
1247 # for single value tensor, we set scale equal to the abs(value),
1248 # and fix zeropoint to 128
1249 # if val > 0, it'll be represented as 129,
1250 # where val = (129 - 128) * val
1251 # if val < 0, it'll be represented as 127,
1252 # where val = (127 - 128) * (-val)
1253 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1254 # and let quantized 1 represent the value
1255 # also adjust effective min/max consequently
1256 if max_val == min_val:
1257 if max_val != 0:
1258 scale = abs(max_val)
1259 else:
1260 scale = 1.0
1261 min_val = float(qmin - qzero[idx]) * scale
1262 max_val = float(qmax - qzero[idx]) * scale
1263 else:
1264 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeon6c93f412023-07-08 07:04:08 +00001265 if op_name == "squared_difference":
1266 zeropoint = -int(round((-min_val) / scale)) + qmin
1267 else:
1268 zeropoint = int(round((-min_val) / scale)) + qmin
Jeremy Johnson015c3552022-02-23 12:15:03 +00001269
1270 # run through tf.fakequant first to assure quantization error aligned
1271 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1272 val,
1273 min=min_val,
1274 max=max_val,
1275 num_bits=num_bits,
1276 name="gen_quant_npy",
1277 )
1278
Jerry Ged69e2832023-07-05 21:54:07 +00001279 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001280
1281 # very few unit tests after TF hash may/2020, this quantized
1282 # value for some reason exceed [0, 255] range
1283 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1284
Jeremy Johnson015c3552022-02-23 12:15:03 +00001285 np.save(
1286 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001287 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001288 False,
1289 )
1290
1291 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1292
1293 # 2. Convert the model to quantized TFLite flatbuffer
1294 module = tf.Module()
1295 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1296 [concrete_function], module
1297 )
1298 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1299 converter.experimental_new_converter = True
1300
1301 # use MLIR-based post-quantizer
1302 converter.experimental_new_quantizer = True
1303
1304 flag = (
1305 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1306 )
1307 if tflite_inference_dtype == tf.int16:
1308 converter.target_spec.supported_ops = [flag]
1309
Won Jeone2325d12023-06-10 15:25:54 +00001310 # Generator function for integer quantization of TFLiteConverter
1311 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1312 # to calibrate/estimate the range of the floating-point inputs.
1313 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1314 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001315 def input_stats():
1316 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001317 placeholders, _ = (
Won Jeon6c93f412023-07-08 07:04:08 +00001318 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, True)
Won Jeone2325d12023-06-10 15:25:54 +00001319 if tensor_gen_fcn == "tgBFuzz"
1320 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1321 )
1322 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001323
1324 converter.representative_dataset = input_stats
1325 converter.inference_input_type = tflite_inference_dtype
1326 converter.inference_output_type = tflite_inference_dtype
1327
1328 tflite_model = converter.convert()
1329
1330 tflite_model_filename = "model.tflite"
1331
1332 # Write out converted model to disk
1333 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1334 f.write(tflite_model)
1335
1336 else: # is_quantized is False
1337
1338 # 1. Saved out numpy array directly
1339 for idx, (name, val) in enumerate(placeholders):
1340 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001341
1342 # Complex tensors are expected to be repsesented by a
1343 # single floating point tensor of shape [?, ..., ?, 2].
1344 if val.dtype == np.complex64:
1345 val_shape = val.shape + (2,)
1346 val = val.view(np.float32)
1347 val = val.reshape(val_shape)
1348
Jeremy Johnson015c3552022-02-23 12:15:03 +00001349 np.save(
1350 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1351 )
1352
1353 # 2.a Saved out .pb if framework includes tensorflow
1354 if "tf" not in excluded_framework_list:
1355 # Write out graph as protobuf to disk
1356 tf_model_filename = "model.pb"
1357 tf.io.write_graph(
1358 concrete_function.graph, test_dir, tf_model_filename, True
1359 )
1360
1361 # 2.b Saved out .tflite if framework includes tflite
1362 if "tflite" not in excluded_framework_list:
1363 # Convert the model to TFLite flatbuffer
1364 module = tf.Module()
Tai Lycf84bc92023-09-07 20:49:09 +00001365
1366 if op_name == "callonce" or op_name == "lstm_stateful":
1367 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1368 [concrete_function], fcn_node
1369 )
1370 else:
1371 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1372 [concrete_function], module
1373 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001374
1375 converter.experimental_new_converter = True
1376
1377 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1378 converter.inference_input_type = tf.float32
1379 converter.inference_output_type = tf.float32
1380 tflite_model = converter.convert()
1381
1382 # Write out converted model to disk
1383 tflite_model_filename = "model.tflite"
1384 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1385 f.write(tflite_model)
1386
1387 # Get TF reference result if .pb is specified
1388 if tf_model_filename:
1389 tf_result_npy_filename = "tf_result.npy"
1390 tf_result = concrete_function(*placeholder_vals)
1391 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1392
1393 tf_result_name = result_name
1394
1395 # Get TFLite inference result if .tflite is specified
1396 if tflite_model_filename:
1397 tflite_result_npy_filename = "tflite_result.npy"
1398
Luke Hutton5c844212023-01-27 14:17:52 +00001399 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001400
1401 if args.tflite_kernel_mode == "optimized" or (
1402 op_name in ops_with_optimized_only_kernel
1403 ):
1404 interpreter = tf.lite.Interpreter(
1405 model_path=os.path.join(test_dir, tflite_model_filename)
1406 )
1407 elif args.tflite_kernel_mode == "reference":
1408 interpreter = tf.lite.Interpreter(
1409 model_path=os.path.join(test_dir, tflite_model_filename),
1410 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1411 )
1412 else:
1413 assert 0, "unknown tflite interpreter mode {}".format(
1414 args.tflite_kernel_mode
1415 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001416
1417 input_details = interpreter.get_input_details()
1418 output_details = interpreter.get_output_details()
1419
Jerry Ge28811d92023-12-05 00:53:26 +00001420 # Prototype dynamic_shape testing
1421 # Need to resize the input tensors to known shapes when evaluating
1422 for idx, val in enumerate(placeholder_vals):
1423 interpreter.resize_tensor_input(
1424 input_details[idx]["index"], placeholder_shapes[idx]
1425 )
1426 interpreter.allocate_tensors()
1427
Jeremy Johnson015c3552022-02-23 12:15:03 +00001428 assert len(input_details) == len(
1429 placeholder_vals
1430 ), "number of placeholder mismatch"
1431
1432 for idx, val in enumerate(placeholder_vals):
1433 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1434
1435 interpreter.invoke()
1436 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1437
1438 np.save(
1439 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1440 )
1441
1442 # Result tensor name would change after converting to TFLite flatbuffer
1443 # Overwrite the information from TFLite models directly.
1444 # Assume single result tensor now
1445 tflite_result_name = output_details[0]["name"]
1446
Eric Kunze97b00272023-07-20 10:52:56 -07001447 _, test_name = os.path.split(test_dir)
1448
Jerry Ged5b15122024-03-26 20:51:48 +00001449 # For specifying the number of variable tensors if the graph has any
1450 try:
1451 num_varaibles = op["num_variables"]
1452 except KeyError:
1453 num_varaibles = 0
1454
Jeremy Johnson015c3552022-02-23 12:15:03 +00001455 # Write out test descriptor
1456 write_test_json(
1457 filename=os.path.join(test_dir, "test.json"),
1458 tf_model_filename=tf_model_filename,
1459 tf_result_npy_filename=tf_result_npy_filename,
1460 tf_result_name=tf_result_name,
1461 tflite_model_filename=tflite_model_filename,
1462 tflite_result_npy_filename=tflite_result_npy_filename,
1463 tflite_result_name=tflite_result_name,
1464 ifm_name=placeholder_names,
1465 ifm_file=placeholder_npy_filenames,
1466 ifm_shape=placeholder_shapes,
TatWai Chong6a46b252024-01-12 13:13:22 -08001467 ifm_dynamic=placeholder_dynamic,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001468 framework_exclusions=excluded_framework_list,
1469 quantized=is_quantized,
Eric Kunze97b00272023-07-20 10:52:56 -07001470 test_name=test_name,
Jerry Ged5b15122024-03-26 20:51:48 +00001471 num_variables=num_varaibles,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001472 )
1473 except Exception as e:
1474 msg = "Error running task: {}".format(e)
1475 print(msg)
1476 print(
1477 "".join(
1478 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1479 )
1480 )
1481 return False
1482 return True
1483
1484
1485def build_const_net(
1486 args,
1487 curr_shape,
1488 op_name,
1489 dtype,
1490 excluded_framework_list,
1491 quantized_inference_dtype,
1492 result_name,
1493 seed,
1494 rng,
1495 filter,
1496 unit_test_args,
1497):
1498
1499 if quantized_inference_dtype:
1500 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1501 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1502 else:
1503 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1504 test_dir = os.path.join(args.output_dir, test_dir)
1505
1506 # If the operator has an additional function to generate arguments, call it
1507 # here and iterate through the argument list that it generates
1508 op = TF_OP_LIST[op_name]
1509 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1510
TatWai Chongfd629052022-07-25 04:01:58 +00001511 try:
1512 rank_lo, rank_hi = op["rank"]
1513 except KeyError:
1514 # Set testing rank to (1, 4) in default.
1515 rank_lo = 1
1516 rank_hi = 4
1517
1518 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1519 return
1520
Jeremy Johnson015c3552022-02-23 12:15:03 +00001521 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1522 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001523 # Only filter on the full test_name, not the output directory
1524 _, test_name = os.path.split(test_dir + desc)
1525 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001526 unit_test_args.append(
1527 [
1528 op_name,
1529 args,
1530 test_dir + desc,
1531 curr_shape,
1532 addl_args,
1533 dtype,
1534 excluded_framework_list,
1535 quantized_inference_dtype,
1536 result_name,
1537 seed,
1538 ]
1539 )
1540
1541
1542# python hash is not reproducible, create hash for our purpose
1543def op_name_hash(op_name):
1544 result = 0xDEADBEEF
1545 for ch in op_name:
1546 if result & 1:
1547 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1548 else:
1549 result = (ord(ch) << 24) ^ (result >> 1)
1550
1551 return result
1552
1553
1554def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1555
1556 if not args.quiet:
1557 print(
1558 "Generating tests for {} ".format(
1559 op_name
1560 )
1561 )
1562
1563 op = TF_OP_LIST[op_name]
1564
1565 # Seed the RNG so that we get the same random tests for each test each time
1566 # If the number of tests for a given generation function changes, the tests
1567 # for that operator may also change accordingly, but this will at least keep
1568 # down churn across operators.
1569
1570 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1571 np.int32
1572 ).max
1573 rng = np.random.default_rng(bounded_hash_val)
1574
1575 # this is a dictionary with 'tf' and 'tflite' as key
1576 # and value being the data types we want to test under these framework
1577
1578 if isinstance(op["types"], dict):
1579 try:
1580 tf_dtypes = op["types"]["tf"]
1581 except KeyError:
1582 tf_dtypes = []
1583 try:
1584 tflite_dtypes = op["types"]["tflite"]
1585 except KeyError:
1586 tflite_dtypes = []
1587 elif isinstance(op["types"], list):
1588 tf_dtypes = op["types"]
1589 tflite_dtypes = op["types"]
1590
1591 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1592 tflite_quantized_dtypes = []
1593 tflite_nonquantized_dtypes = []
1594 for dtype in tflite_dtypes:
1595 if isinstance(dtype, QuantType):
1596 tflite_quantized_dtypes.append(dtype)
1597 else:
1598 tflite_nonquantized_dtypes.append(dtype)
1599
1600 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1601 set(tflite_nonquantized_dtypes)
1602 )
1603 nonquantized_dtypes = list(nonquantized_dtypes_set)
1604 quantized_dtypes = tflite_quantized_dtypes
1605
Jerry Ge5dd5a552023-05-23 22:41:20 +00001606 # append custom_shapes or replace shape_list with custom_shapes
1607 try:
1608 custom_shapes = op["custom_shapes"]
1609 if custom_shapes["custom_shape_only"]:
1610 shape_list = custom_shapes["shape_list"]
1611 else:
Jerry Geabdac232023-06-12 16:27:16 +00001612 shape_list = shape_list.copy()
Won Jeonf9c0cee2023-09-18 16:32:45 -07001613 shape_list.extend(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001614 except KeyError:
1615 pass
1616
Jeremy Johnson015c3552022-02-23 12:15:03 +00001617 # populate non quantized unit test arguments
1618 for dtype in nonquantized_dtypes:
1619
1620 excluded_framework_set = set(ALL_FRAMEWORKS)
1621 if dtype in tf_nonquantized_dtypes:
1622 excluded_framework_set.remove("tf")
1623 if dtype in tflite_nonquantized_dtypes:
1624 excluded_framework_set.remove("tflite")
1625 excluded_framework_list = list(excluded_framework_set)
1626
1627 for curr_shape in shape_list:
1628 build_const_net(
1629 args,
1630 curr_shape,
1631 op_name,
1632 dtype,
1633 excluded_framework_list,
1634 None,
1635 result_name,
1636 bounded_hash_val,
1637 rng,
1638 filter,
1639 unit_test_args,
1640 )
1641
1642 # populate quantized unit test arguments
1643 # must exclude 'tf' and source dtype being tf.float32
1644 for dtype in quantized_dtypes:
1645 for curr_shape in shape_list:
1646 build_const_net(
1647 args,
1648 curr_shape,
1649 op_name,
1650 tf.float32,
1651 ["tf"],
1652 dtype,
1653 result_name,
1654 bounded_hash_val,
1655 rng,
1656 filter,
1657 unit_test_args,
1658 )
1659
1660 return unit_test_args
1661
1662
1663def createDynamicOpLists():
1664 """The templated operators are conv2d-style operators with a number of kernel
1665 sizes. Since the operator is unchanged, we generate the range of kernel
1666 sizes here in this loop and remove the original templates from the list.
1667
1668 This could be expanded to non-conv2d-style operators in the future."""
1669
1670 # Dynamically create op lists for convolutions with a list of kernel sizes
1671 KERNELS = [
1672 [1, 1],
1673 [3, 3],
1674 [5, 5],
1675 ]
1676
TatWai Chongfd629052022-07-25 04:01:58 +00001677 # dim = [D, H, W]
1678 KERNELS_3D = [
1679 [1, 1, 1],
1680 [2, 3, 3],
1681 [3, 5, 5],
1682 ]
1683
Jeremy Johnson015c3552022-02-23 12:15:03 +00001684 TEMPLATE_LIST = [
1685 "conv2d",
1686 "conv2d_bias",
1687 "conv2d_relu",
1688 "conv2d_relu6",
1689 "conv2d_relu_n1_to_1",
1690 "conv2d_tanh",
1691 "depthwise_conv2d",
1692 "depthwise_conv2d_bias",
1693 "transpose_conv2d",
1694 ]
1695
TatWai Chongfd629052022-07-25 04:01:58 +00001696 TEMPLATE_LIST_CONV3D = [
1697 "conv3d",
1698 "conv3d_bias",
1699 ]
1700
Jeremy Johnson015c3552022-02-23 12:15:03 +00001701 for t in TEMPLATE_LIST:
1702 for k in KERNELS:
1703 testName = "{}_{}x{}".format(t, k[0], k[1])
1704 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1705 TF_OP_LIST[testName]["filter"] = k
1706 TF_OP_LIST[testName]["template"] = False
1707
TatWai Chongfd629052022-07-25 04:01:58 +00001708 # The existing operators don't support the dimension of kernel that is higher than 2.
1709 for t in TEMPLATE_LIST_CONV3D:
1710 for k in KERNELS_3D:
1711 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1712 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1713 TF_OP_LIST[testName]["filter"] = k
1714 TF_OP_LIST[testName]["template"] = False
1715
Jeremy Johnson015c3552022-02-23 12:15:03 +00001716 # Delete any templates after having created any dynamic ops
1717 # This is a two-pass operation because it's bad practice to delete
1718 # keys from dictionaries while iterating
1719 keyList = []
1720 for k in TF_OP_LIST:
1721 try:
1722 if TF_OP_LIST[k]["template"]:
1723 keyList.append(k)
1724 continue
1725 except KeyError:
1726 pass
1727
1728 for k in keyList:
1729 del TF_OP_LIST[k]
1730
1731
1732def main():
1733 parser = argparse.ArgumentParser()
1734 parser.add_argument(
1735 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1736 )
1737 parser.add_argument(
1738 "--random-shapes",
1739 dest="random_shapes",
1740 default=0,
1741 type=int,
1742 help=(
1743 "Use N random shapes of each rank for generating tests,"
1744 "seeded with random seed"
1745 ),
1746 )
1747 parser.add_argument(
1748 "-o",
1749 "--output-dir",
1750 dest="output_dir",
1751 default=".",
1752 type=str,
1753 help="Test output directory path prefix",
1754 )
1755 parser.add_argument(
1756 "-q",
1757 "--quiet",
1758 dest="quiet",
1759 default=False,
1760 action="store_true",
1761 help="Do not print test names",
1762 )
1763 parser.add_argument(
1764 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1765 )
1766 parser.add_argument(
1767 "-m",
1768 "--tflite-kernel-mode",
1769 dest="tflite_kernel_mode",
1770 type=str,
1771 choices=["reference", "optimized"],
1772 default="reference",
1773 help="TFLite interpreter kernel mode",
1774 )
1775 parser.add_argument(
1776 "--num-samples",
1777 dest="num_samples",
1778 default=200,
1779 type=int,
1780 help="Number of input samples for post-training quantization",
1781 )
1782 parser.add_argument(
1783 "--filter",
1784 dest="filter",
1785 default="",
1786 type=str,
1787 help="Filter test names by this expression",
1788 )
1789 args = parser.parse_args()
1790
1791 # Turn the filter into a re object if present
1792 filter = None
1793 if args.filter != "":
1794 filter = re.compile(args.filter)
1795
1796 # Autodetect CPU count
1797 if args.jobs <= 0:
1798 args.jobs = os.cpu_count()
1799
1800 # Disable TF info messages
1801 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1802
1803 try:
1804 os.makedirs(args.output_dir)
1805 except FileExistsError:
1806 pass
1807
1808 if args.random_shapes:
1809 gen_rand_shapes(args)
1810
1811 # Build dynamic ops
1812 createDynamicOpLists()
1813
1814 # Generate the test list and arguments to run_unit_test()
1815 unit_test_args = []
1816
1817 for op in TF_OP_LIST:
1818 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1819
1820 errors = 0
1821 for t in unit_test_args:
1822 if not run_unit_test(*t):
1823 errors = errors + 1
1824
1825 if not args.quiet:
1826 print("\nAll tasks done - with {} errors".format(errors))
1827
1828 return 1 if errors else 0
1829
1830
1831if __name__ == "__main__":
1832 exit(main())