blob: 538f314ac134c70510be62fde56db5647fabeef0 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Jerry Ge54bb61e2023-12-20 22:21:24 +00002# Copyright (c) 2020-2024, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
Tai Lycf84bc92023-09-07 20:49:09 +000031
Jeremy Johnson015c3552022-02-23 12:15:03 +000032from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700177 "rank": (0, 4),
178 "custom_shapes": {
179 "custom_shape_only": False,
180 "shape_list": [()],
181 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000182 },
183 "bitwise_and": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_or": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_not": {
194 "operands": (1, 0),
195 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "bitwise_xor": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
201 "types": {"tf": TYPE_I}, # Not supported in TF Lite
202 },
203 "logical_and": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_or": {
209 "operands": (2, 0),
210 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "logical_not": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
216 "types": TYPE_B,
217 },
218 "reduce_any": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
221 "types": TYPE_B,
222 },
223 "reduce_all": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800226 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000227 },
228 "reduce_min": {
229 "operands": (1, 0),
230 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
231 "types": {
232 "tf": TYPE_FI,
233 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
234 },
235 },
236 "reduce_max": {
237 "operands": (1, 0),
238 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
239 "types": {
240 "tf": TYPE_FI,
241 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
242 },
243 },
244 "reduce_sum": {
245 "operands": (1, 0),
246 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
247 "types": {
248 "tf": TYPE_F,
249 # v2 converter doesn't recognize quantized reduce_sum
250 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
251 "tflite": TYPE_F,
252 },
253 },
254 "reduce_mean": {
255 "operands": (1, 0),
256 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
257 "types": {
258 "tf": TYPE_F,
259 "tflite": list(
260 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
261 ),
262 },
263 },
264 "reduce_product": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
267 "types": TYPE_F,
268 },
269 "min": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "max": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
277 "types": TYPE_FI,
278 },
279 "pow": {
280 "operands": (2, 0),
281 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
282 # Technically, integer is supported, but only for positive exponents.
283 # Needs a random argument generator.
284 "types": TYPE_F,
285 },
286 "abs": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "ceil": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "floor": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "log": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "negate": {
307 "operands": (1, 0),
308 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
309 "types": TYPE_F,
310 },
311 "rsqrt": {
312 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800313 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
317 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000318 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800319 "sign": {
320 "operands": (1, 0),
321 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
322 "types": {
323 "tf": TYPE_F,
324 },
325 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000326 "sigmoid": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
336 "tanh": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
339 "types": {
340 "tf": TYPE_F,
341 "tflite": list(
342 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
343 ),
344 },
345 },
Won Jeon78155c62023-06-10 00:20:04 +0000346 "erf": {
347 "operands": (1, 0),
348 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tf": TYPE_F,
351 },
352 },
Luke Hutton41601862022-12-06 17:29:15 +0000353 "sin": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000356 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000357 },
358 "cos": {
359 "operands": (1, 0),
360 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000361 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000362 },
Luke Hutton2138a192022-12-15 11:01:39 +0000363 "atan2": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
366 "types": {
367 "tflite": TYPE_F,
368 },
369 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000370 "square": {
371 "operands": (1, 0),
372 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
373 "types": TYPE_F,
374 },
375 "squared_difference": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000378 "types": {
379 "tf": TYPE_F,
380 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
381 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000382 },
383 "equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "greater_equal": {
389 "operands": (2, 0),
390 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
391 "types": TYPE_FI,
392 },
393 "greater": {
394 "operands": (2, 0),
395 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
396 "types": TYPE_FI,
397 },
398 "less": {
399 "operands": (2, 0),
400 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
401 "types": TYPE_FI,
402 },
403 "less_equal": {
404 "operands": (2, 0),
405 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
406 "types": TYPE_FI,
407 },
408 "conv2d_TEMPLATE": {
409 "operands": (1, 1),
410 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 "conv2d_relu6_TEMPLATE": {
437 "operands": (1, 2),
438 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
439 "types": {
440 "tf": [tf.float32],
441 "tflite": [
442 tf.float32,
443 QuantType.CONV_U8_U8,
444 QuantType.CONV_I8_I8,
445 QuantType.CONV_I16_I8,
446 ],
447 },
448 "template": True,
449 },
450 "conv2d_relu_n1_to_1_TEMPLATE": {
451 "operands": (1, 2),
452 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
453 "types": {
454 "tf": [tf.float32],
455 "tflite": [
456 tf.float32,
457 QuantType.CONV_U8_U8,
458 QuantType.CONV_I8_I8,
459 QuantType.CONV_I16_I8,
460 ],
461 },
462 "template": True,
463 },
464 # This test is converted as:
465 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
466 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
467 "conv2d_tanh_TEMPLATE": {
468 "operands": (1, 2),
469 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
470 "types": {
471 "tf": [tf.float32],
472 "tflite": [
473 tf.float32,
474 QuantType.CONV_U8_U8,
475 QuantType.CONV_I8_I8,
476 QuantType.CONV_I16_I8,
477 ],
478 },
479 "template": True,
480 },
481 "conv2d_bias_TEMPLATE": {
482 "operands": (1, 2),
483 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
484 "types": {
485 "tf": [tf.float32],
486 "tflite": [
487 tf.float32,
488 QuantType.CONV_U8_U8,
489 QuantType.CONV_I8_I8,
490 QuantType.CONV_I16_I8,
491 ],
492 },
493 "bias": True,
494 "template": True,
495 },
TatWai Chongfd629052022-07-25 04:01:58 +0000496 "conv3d_TEMPLATE": {
497 "operands": (1, 1),
498 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 # Quantization to 16x8-bit not yet supported by tflite.
506 ],
507 },
508 "template": True,
509 "rank": (1, 5),
510 },
511 "conv3d_bias_TEMPLATE": {
512 "operands": (1, 2),
513 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 # Quantization to 16x8-bit not yet supported by tflite.
521 ],
522 },
523 "bias": True,
524 "template": True,
525 "rank": (1, 5),
526 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000527 "depthwise_conv2d_TEMPLATE": {
528 "operands": (1, 1),
529 "build_fcn": (
530 TBuilder.DepthwiseConv2d,
531 TGen.tgDepthwiseConv2d,
532 ArgGen.agDepthwiseConv2d,
533 ),
534 "types": {
535 "tf": [tf.float32],
536 "tflite": [
537 tf.float32,
538 QuantType.CONV_U8_U8,
539 QuantType.CONV_I8_I8,
540 QuantType.CONV_I16_I8,
541 ],
542 },
543 "template": True,
544 },
545 "depthwise_conv2d_bias_TEMPLATE": {
546 "operands": (1, 2),
547 "build_fcn": (
548 TBuilder.DepthwiseConv2dWithBias,
549 TGen.tgDepthwiseConv2d,
550 ArgGen.agDepthwiseConv2d,
551 ),
552 "types": {
553 "tf": [tf.float32],
554 "tflite": [
555 tf.float32,
556 QuantType.CONV_U8_U8,
557 QuantType.CONV_I8_I8,
558 QuantType.CONV_I16_I8,
559 ],
560 },
561 "bias": True,
562 "template": True,
563 },
564 "transpose_conv2d_TEMPLATE": {
565 "operands": (1, 1),
566 "build_fcn": (
567 TBuilder.TransposeConv2d,
568 TGen.tgTransposeConv2d,
569 ArgGen.agTransposeConv2d,
570 ),
571 "types": {
572 "tf": [tf.float32],
573 "tflite": [
574 tf.float32,
575 QuantType.CONV_U8_U8,
576 QuantType.CONV_I8_I8,
577 QuantType.CONV_I16_I8,
578 ],
579 },
580 "template": True,
581 },
582 "argmax": {
583 "operands": (1, 0),
584 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
585 "types": {"tf": TYPE_F},
586 },
587 "avg_pool2d": {
588 "operands": (1, 0),
589 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
590 "types": {
591 "tf": TYPE_F,
592 "tflite": list(
593 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
594 ),
595 },
596 },
597 "max_pool2d": {
598 "operands": (1, 0),
599 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
600 "types": {
601 "tf": TYPE_F,
602 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
603 # ALL_I16 not supported yet
604 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
605 # QI16 is missing from MaxPoolOperandAndResultConstraints
606 # If adding QI16 back this test can run through.
607 },
608 },
609 "reshape": {
610 "operands": (1, 0),
611 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
612 "types": TYPE_FI,
613 },
614 "transpose": {
615 "operands": (1, 0),
616 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
617 "types": TYPE_FI,
618 },
619 "slice": {
620 "operands": (1, 0),
621 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
622 "types": TYPE_FI,
623 },
624 "strided_slice": {
625 "operands": (1, 0),
626 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
627 "types": TYPE_FI,
628 },
629 "select": {
630 "operands": (3, 0),
631 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
632 "types": TYPE_FI,
633 },
634 "addn": {
635 "operands": (4, 0),
636 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
637 "types": TYPE_FI,
638 },
639 "concatv2": {
640 "operands": (4, 0),
641 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
642 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700643 "rank": (0, 4),
644 "custom_shapes": {
645 "custom_shape_only": False,
646 "shape_list": [()],
647 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000648 },
649 "stack": {
650 "operands": (4, 0),
651 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
652 "types": TYPE_FI,
653 },
654 "unstack": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
657 "types": TYPE_F,
658 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000659 "mirrorpad": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
662 "types": TYPE_FI,
663 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000664 "pad": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800667 "types": {
668 "tf": TYPE_F,
669 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
670 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000671 },
672 "expand_dims": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
675 "types": TYPE_FI,
676 },
677 "shape": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
680 "types": TYPE_FI,
681 },
682 "rank": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_FI,
686 },
687 "fill": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
690 "types": TYPE_FI,
691 },
692 "elu": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
695 "types": TYPE_F,
696 },
697 "softmax": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
700 "types": {
701 "tf": TYPE_F,
702 "tflite": list(
703 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
704 ),
705 },
706 },
707 "log_softmax": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
710 "types": TYPE_F,
711 },
Jerry Ge28811d92023-12-05 00:53:26 +0000712 "dynamic_linear": {
713 "operands": (1, 0),
714 "build_fcn": (TBuilder.DynamicLinear, TGen.tgBasic, ArgGen.agNone),
715 "types": {
716 "tf": [],
717 "tflite": list(TYPE_F),
718 },
719 "custom_shapes": {
720 "custom_shape_only": True,
721 "shape_list": [(14, 19)],
722 },
723 # number of operands of tuples which spcifies which dim to set to None
724 # In this case, we have 1 input. So we have 1 tuple
725 # We're setting the first input's first dim to None
726 "dynamic_shape_dim": [
727 (0,),
728 ],
729 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000730 "matmul": {
731 "operands": (2, 0),
732 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
733 "types": {
734 "tf": TYPE_F,
735 "tflite": list(
736 TYPE_F
737 + [QuantType.ALL_U8, QuantType.ALL_I8]
738 # 16 bits matmul fail to convert
739 ),
740 },
741 },
742 "add_scalar": {
743 "operands": (1, 0),
744 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
745 "types": TYPE_F,
746 },
747 "add_1d": {
748 "operands": (2, 0),
749 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
750 "types": TYPE_F,
751 },
752 "split": {
753 "operands": (1, 0),
754 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
755 "types": TYPE_FI,
756 },
757 "tile": {
758 "operands": (1, 0),
759 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
760 "types": TYPE_FI,
761 },
762 "reverse": {
763 "operands": (1, 0),
764 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
765 "types": {"tf": TYPE_FI},
766 },
767 "gather": {
768 "operands": (1, 0),
769 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
770 "types": TYPE_FI,
771 },
772 "gather_nd": {
773 "operands": (1, 0),
774 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
775 "types": TYPE_FI,
776 },
777 "scatter_nd": {
778 "operands": (1, 0),
779 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
780 "types": TYPE_FI,
781 },
782 "space_to_batch": {
783 "operands": (1, 0),
784 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
785 "types": TYPE_F,
786 },
787 "batch_to_space": {
788 "operands": (1, 0),
789 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
790 "types": TYPE_F,
791 },
Jerry Ge28811d92023-12-05 00:53:26 +0000792 "dynamic_batch_to_space": {
793 "operands": (1, 0),
794 "build_fcn": (
795 TBuilder.DynamicBatchToSpace,
796 TGen.tgBasic,
797 ArgGen.agBatchToSpace,
798 ),
799 "types": TYPE_F,
800 "custom_shapes": {
801 "custom_shape_only": True,
802 "shape_list": [(8, 4, 4, 4)],
803 },
804 # number of operands of tuples which spcifies which dim to set to None
805 # In this case, we have 1 input. So we have 1 tuple
806 # We're setting the first input's 0th dim to None
807 "dynamic_shape_dim": [
808 (0,),
809 ],
810 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000811 "space_to_depth": {
812 "operands": (1, 0),
813 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
814 "types": TYPE_F,
815 },
Jerry Ge28811d92023-12-05 00:53:26 +0000816 "dynamic_space_to_depth": {
817 "operands": (1, 0),
818 "build_fcn": (TBuilder.DynamicSpaceToDepth, TGen.tgBasic, ArgGen.agNone),
819 "types": {
820 "tf": [],
821 "tflite": list(TYPE_F),
822 },
823 "custom_shapes": {
824 "custom_shape_only": True,
825 "shape_list": [(1, 32, 32, 8)],
826 },
827 # number of operands of tuples which spcifies which dim to set to None
828 # In this case, we have 1 input. So we have 1 tuple
TatWai Chong6a46b252024-01-12 13:13:22 -0800829 # We're setting the first input's first (batch) dim to None
Jerry Ge28811d92023-12-05 00:53:26 +0000830 "dynamic_shape_dim": [
TatWai Chong6a46b252024-01-12 13:13:22 -0800831 (0,),
Jerry Ge28811d92023-12-05 00:53:26 +0000832 ],
833 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000834 "depth_to_space": {
835 "operands": (1, 0),
836 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
837 "types": TYPE_F,
838 },
Jerry Ge28811d92023-12-05 00:53:26 +0000839 "dynamic_depth_to_space": {
840 "operands": (1, 0),
841 "build_fcn": (TBuilder.DynamicDepthToSpace, TGen.tgBasic, ArgGen.agNone),
842 "types": {
843 "tf": [],
844 "tflite": list(TYPE_F),
845 },
846 "custom_shapes": {
847 "custom_shape_only": True,
848 "shape_list": [(1, 1, 1, 4)],
849 },
850 # number of operands of tuples which spcifies which dim to set to None
851 # In this case, we have 1 input. So we have 1 tuple
TatWai Chong6a46b252024-01-12 13:13:22 -0800852 # We're setting the first input's first (batch) dim to None
Jerry Ge28811d92023-12-05 00:53:26 +0000853 "dynamic_shape_dim": [
TatWai Chong6a46b252024-01-12 13:13:22 -0800854 (0,),
Jerry Ge28811d92023-12-05 00:53:26 +0000855 ],
856 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000857 "one_hot": {
858 "operands": (3, 1),
859 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
860 "types": TYPE_FI,
861 },
862 "fakequant": {
863 "operands": (1, 0),
864 "build_fcn": (
865 TBuilder.Fakequant,
866 TGen.tgBasic,
867 ArgGen.agFakequant,
868 ),
869 "types": {"tf": TYPE_F},
870 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800871 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000872 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800873 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700874 "types": {
875 "tf": TYPE_F,
876 "tflite": list(
877 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
878 ),
879 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000880 "custom_shapes": {
881 "custom_shape_only": False,
882 "shape_list": [(3, 1, 1, 7)],
883 },
TatWai Chongf7326092022-06-08 12:17:14 -0700884 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000885 "left_shift": {
886 "operands": (1, 0),
887 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
888 "types": {"tf": [tf.int32]},
889 },
890 "right_shift": {
891 "operands": (1, 0),
892 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
893 "types": {
894 "tf": [
895 tf.int32,
896 ]
897 },
898 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700899 "while": {
900 "operands": (1, 0),
901 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
902 "types": {
903 "tflite": list(TYPE_F),
904 },
905 },
906 "lstm": {
907 "operands": (1, 0),
908 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
909 "types": {
910 "tflite": [
911 tf.float32,
912 # tf.int32
913 ]
914 },
915 },
Tai Lycf84bc92023-09-07 20:49:09 +0000916 "lstm_stateful": {
917 "operands": (1, 0),
918 "build_fcn": (TBuilder.SLSTM, TGen.tgRecurrent, ArgGen.agNone),
919 "types": {
920 "tflite": [
921 tf.float32,
922 ]
923 },
924 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700925 "gru": {
926 "operands": (1, 0),
927 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
928 "types": {
929 "tflite": [
930 tf.float32,
931 # tf.int32
932 ]
933 },
934 },
935 "rnn": {
936 "operands": (1, 0),
937 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
938 "types": {
939 "tflite": [
940 tf.float32,
941 ]
942 },
943 },
Tai Lycf84bc92023-09-07 20:49:09 +0000944 "callonce": {
945 "operands": (1, 0),
946 "build_fcn": (TBuilder.CallOnce, TGen.tgBasic, ArgGen.agNone),
947 "types": {
948 "tflite": [tf.float32],
949 },
950 "custom_shapes": {
951 "custom_shape_only": True,
952 "shape_list": [(1,)],
953 },
954 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000955 "rfft2d": {
956 "operands": (1, 0),
957 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
958 "types": {
959 "tflite": TYPE_F,
960 },
961 },
Luke Hutton714aa602023-02-08 19:45:26 +0000962 "real": {
963 "operands": (1, 0),
964 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
965 "types": {
966 "tflite": [tf.complex64],
967 },
968 },
969 "imag": {
970 "operands": (1, 0),
971 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
972 "types": {
973 "tflite": [tf.complex64],
974 },
975 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000976 "broadcastto": {
977 "operands": (1, 1),
978 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
979 "types": {
980 "tf": TYPE_FIB,
981 },
982 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000983}
984
985# Shapes to be tested; default can be overwritten
986shape_list = [
987 (1,),
988 (64,),
989 (14, 19),
990 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000991 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000992 (1, 4, 4, 4),
993 (1, 8, 4, 17),
994 (1, 4, 8, 19),
995 (1, 32, 32, 8),
996 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800997 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000998 (2, 2, 7, 7, 2),
999 (1, 4, 8, 21, 17),
1000 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001001]
1002
1003
1004def gen_rand_shapes(args):
1005 """Overwrite the global shape list with a new list of random shapes"""
1006 global shape_list
1007
1008 rng = np.random.default_rng(args.random_seed)
1009
1010 # Don't let things get too big... cap the maximum volume, but let
1011 # an individual dimension be 1..47
1012 max_total_volume = 32 * 32 * 4
1013
1014 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +00001015 # Only iterate over ranks 2, 3, 4, and 5
1016 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001017 for n in range(args.random_shapes):
1018 new_shape = rng.integers(1, 48, size=rank)
1019
TatWai Chongfd629052022-07-25 04:01:58 +00001020 # Set the batch dimension on 4D or 5D objects to 1
1021 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +00001022 new_shape[0] = 1
1023
1024 # Limit the total shape volume and throw out any
1025 # shapes that wouldn't leave at least size=2 in some non-batch dimension
1026 volume = 1
1027 skip_shape = False
1028 for i in range(rank):
1029
1030 volume *= new_shape[i]
1031
1032 # Reduce the shape, while it's larger than the maximum volume
1033 while volume > max_total_volume:
1034 new_shape[i] = new_shape[i] // 2
1035 volume = volume // 2
1036
1037 # Now an untenable dimension size? Skip this one.
1038 if new_shape[i] < 1:
1039 skip_shape = True
1040
1041 if not skip_shape:
1042 shape_list.append(tuple(new_shape))
1043
1044
1045# Construct, run and save a whole tensorflow tf.function to a protobuf file
1046# or convert to .tflite if it's quantized unit test
1047def run_unit_test(
1048 op_name,
1049 args,
1050 test_dir,
1051 curr_shape,
1052 addl_args,
1053 dtype,
1054 excluded_framework_list,
1055 quantized_inference_dtype,
1056 result_name,
1057 seed,
1058):
1059
1060 try:
1061 op = TF_OP_LIST[op_name]
1062 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1063
1064 # Get and seed a random number generator for this test
1065 rng = np.random.default_rng(seed)
1066
1067 # return placeholders=(str: name, np.array: value)
1068 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +00001069 placeholders, consts = (
Won Jeon6c93f412023-07-08 07:04:08 +00001070 tensor_gen_fcn(op, curr_shape, dtype, rng, False)
Won Jeone2325d12023-06-10 15:25:54 +00001071 if tensor_gen_fcn.__name__ == "tgBFuzz"
1072 else tensor_gen_fcn(op, curr_shape, dtype, rng)
1073 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001074
1075 # if test doesn't have any placeholders/consts, terminated
1076 if len(placeholders) == 0 and len(consts) == 0:
1077 return True
1078
1079 if not args.quiet:
1080 print(" {} ".format(test_dir))
1081
1082 try:
1083 os.mkdir(test_dir)
1084 except FileExistsError:
1085 pass
1086
1087 const_nodes = [value for name, value in consts]
1088
1089 num_placeholders = len(placeholders)
1090 # if test is quantized, create tensor quantization metadata info for
1091 # each input tensor, based on different quantized type
1092 if quantized_inference_dtype:
1093 is_quantized = True
1094 # TODO: support INT8 IFM x INT4 weight later
1095 if quantized_inference_dtype == QuantType.ALL_U8:
1096 qzero = [128] * num_placeholders
1097 numpy_dtype = [np.uint8] * num_placeholders
1098 tflite_inference_dtype = tf.uint8
1099 elif quantized_inference_dtype == QuantType.ALL_I8:
1100 qzero = [0] * num_placeholders
1101 numpy_dtype = [np.int8] * num_placeholders
1102 tflite_inference_dtype = tf.int8
1103 elif quantized_inference_dtype == QuantType.ALL_I16:
1104 qzero = [0] * num_placeholders
1105 numpy_dtype = [np.int16] * num_placeholders
1106 tflite_inference_dtype = tf.int16
1107 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1108 assert (
1109 num_placeholders == 1
1110 ), "Unsupported number of placeholders for Convolution: {}".format(
1111 num_placeholders
1112 )
1113 qzero = [128] * num_placeholders
1114 if num_placeholders == 2:
1115 numpy_dtype = [np.uint8, np.uint8]
1116 else:
1117 numpy_dtype = [np.uint8, np.uint8, np.int32]
1118 tflite_inference_dtype = tf.uint8
1119 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1120 assert (
1121 num_placeholders == 1
1122 ), "Unsupported number of placeholders for Convolution: {}".format(
1123 num_placeholders
1124 )
1125 qzero = [0] * num_placeholders
1126 if num_placeholders == 2:
1127 numpy_dtype = [np.int8, np.int8]
1128 else:
1129 numpy_dtype = [np.int8, np.int8, np.int32]
1130 tflite_inference_dtype = tf.int8
1131 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1132 assert (
1133 num_placeholders == 1
1134 ), "Unsupported number of placeholders for Convolution: {}".format(
1135 num_placeholders
1136 )
1137 if num_placeholders == 2:
1138 qzero = [0, 0]
1139 numpy_dtype = [np.int16, np.int8]
1140 else:
1141 qzero = [0, 0, 0]
1142 numpy_dtype = [
1143 np.int16,
1144 np.int8,
1145 np.int64,
1146 ] # np.int64 to represent 40 bits accumulator
1147 tflite_inference_dtype = tf.int16
1148 else:
1149 raise Exception(
1150 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1151 )
1152
1153 else:
1154 is_quantized = False
1155
1156 tf_model_filename = None
1157 tf_result_npy_filename = None
1158 tf_result_name = None
1159
1160 tflite_model_filename = None
1161 tflite_result_npy_filename = None
1162 tflite_result_name = None
1163
1164 placeholder_names = []
1165 placeholder_vals = []
1166 placeholder_signatures = ()
1167 placeholder_npy_filenames = []
1168 placeholder_shapes = []
TatWai Chong6a46b252024-01-12 13:13:22 -08001169 placeholder_dynamic = False
Jeremy Johnson015c3552022-02-23 12:15:03 +00001170
1171 for idx, (name, val) in enumerate(placeholders):
Jerry Ge54bb61e2023-12-20 22:21:24 +00001172 input_shape = tuple(val.shape)
1173
Jerry Ge28811d92023-12-05 00:53:26 +00001174 try:
1175 dynamic_shape_dim_tuples = op["dynamic_shape_dim"]
1176 dim_tuple = dynamic_shape_dim_tuples[idx]
1177 dim = dim_tuple[0]
Jerry Ge54bb61e2023-12-20 22:21:24 +00001178 input_shape = list(input_shape)
Jerry Ge28811d92023-12-05 00:53:26 +00001179 input_shape[dim] = None
TatWai Chong6a46b252024-01-12 13:13:22 -08001180 # When any dimension size is unknown, mark the placeholder as dynamic type.
1181 placeholder_dynamic = True
Jerry Ge28811d92023-12-05 00:53:26 +00001182
Jerry Ge54bb61e2023-12-20 22:21:24 +00001183 addl_args.append(tuple(input_shape))
Jerry Ge28811d92023-12-05 00:53:26 +00001184 except KeyError:
1185 pass
1186
Jeremy Johnson015c3552022-02-23 12:15:03 +00001187 placeholder_names.append(name)
1188 placeholder_signatures = placeholder_signatures + (
Jerry Ge54bb61e2023-12-20 22:21:24 +00001189 tf.TensorSpec(shape=input_shape, dtype=val.dtype, name=name),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001190 )
1191 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1192 placeholder_shapes.append(val.shape)
1193
1194 # Get test builder class
1195 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1196 concrete_function = tf.function(input_signature=placeholder_signatures)(
1197 fcn_node.eval
1198 ).get_concrete_function()
1199
1200 if is_quantized:
1201
1202 assert dtype is tf.float32, "quantized test must come from float32 graph"
1203
1204 # 1. Quantize float placeholder npy to quantized to feed the graph
1205 for idx, (name, val) in enumerate(placeholders):
1206
1207 # we use np.amin()/np.amax() to determine dynamic range
1208 # for quantized test
1209 zeropoint = 0
1210 scale = 1.0
1211 if numpy_dtype[idx] != np.int64:
1212 qmin = np.iinfo(numpy_dtype[idx]).min
1213 qmax = np.iinfo(numpy_dtype[idx]).max
1214 num_bits = np.iinfo(numpy_dtype[idx]).bits
1215 # 40 bit is represented as np.int64
1216 else:
1217 num_bits = 40
1218 qmin = -(1 << num_bits)
1219 qmax = (1 << num_bits) - 1
1220
1221 min_val = np.amin(val)
1222 max_val = np.amax(val)
1223
1224 # for single value tensor, we set scale equal to the abs(value),
1225 # and fix zeropoint to 128
1226 # if val > 0, it'll be represented as 129,
1227 # where val = (129 - 128) * val
1228 # if val < 0, it'll be represented as 127,
1229 # where val = (127 - 128) * (-val)
1230 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1231 # and let quantized 1 represent the value
1232 # also adjust effective min/max consequently
1233 if max_val == min_val:
1234 if max_val != 0:
1235 scale = abs(max_val)
1236 else:
1237 scale = 1.0
1238 min_val = float(qmin - qzero[idx]) * scale
1239 max_val = float(qmax - qzero[idx]) * scale
1240 else:
1241 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeon6c93f412023-07-08 07:04:08 +00001242 if op_name == "squared_difference":
1243 zeropoint = -int(round((-min_val) / scale)) + qmin
1244 else:
1245 zeropoint = int(round((-min_val) / scale)) + qmin
Jeremy Johnson015c3552022-02-23 12:15:03 +00001246
1247 # run through tf.fakequant first to assure quantization error aligned
1248 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1249 val,
1250 min=min_val,
1251 max=max_val,
1252 num_bits=num_bits,
1253 name="gen_quant_npy",
1254 )
1255
Jerry Ged69e2832023-07-05 21:54:07 +00001256 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001257
1258 # very few unit tests after TF hash may/2020, this quantized
1259 # value for some reason exceed [0, 255] range
1260 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1261
Jeremy Johnson015c3552022-02-23 12:15:03 +00001262 np.save(
1263 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001264 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001265 False,
1266 )
1267
1268 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1269
1270 # 2. Convert the model to quantized TFLite flatbuffer
1271 module = tf.Module()
1272 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1273 [concrete_function], module
1274 )
1275 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1276 converter.experimental_new_converter = True
1277
1278 # use MLIR-based post-quantizer
1279 converter.experimental_new_quantizer = True
1280
1281 flag = (
1282 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1283 )
1284 if tflite_inference_dtype == tf.int16:
1285 converter.target_spec.supported_ops = [flag]
1286
Won Jeone2325d12023-06-10 15:25:54 +00001287 # Generator function for integer quantization of TFLiteConverter
1288 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1289 # to calibrate/estimate the range of the floating-point inputs.
1290 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1291 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001292 def input_stats():
1293 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001294 placeholders, _ = (
Won Jeon6c93f412023-07-08 07:04:08 +00001295 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, True)
Won Jeone2325d12023-06-10 15:25:54 +00001296 if tensor_gen_fcn == "tgBFuzz"
1297 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1298 )
1299 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001300
1301 converter.representative_dataset = input_stats
1302 converter.inference_input_type = tflite_inference_dtype
1303 converter.inference_output_type = tflite_inference_dtype
1304
1305 tflite_model = converter.convert()
1306
1307 tflite_model_filename = "model.tflite"
1308
1309 # Write out converted model to disk
1310 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1311 f.write(tflite_model)
1312
1313 else: # is_quantized is False
1314
1315 # 1. Saved out numpy array directly
1316 for idx, (name, val) in enumerate(placeholders):
1317 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001318
1319 # Complex tensors are expected to be repsesented by a
1320 # single floating point tensor of shape [?, ..., ?, 2].
1321 if val.dtype == np.complex64:
1322 val_shape = val.shape + (2,)
1323 val = val.view(np.float32)
1324 val = val.reshape(val_shape)
1325
Jeremy Johnson015c3552022-02-23 12:15:03 +00001326 np.save(
1327 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1328 )
1329
1330 # 2.a Saved out .pb if framework includes tensorflow
1331 if "tf" not in excluded_framework_list:
1332 # Write out graph as protobuf to disk
1333 tf_model_filename = "model.pb"
1334 tf.io.write_graph(
1335 concrete_function.graph, test_dir, tf_model_filename, True
1336 )
1337
1338 # 2.b Saved out .tflite if framework includes tflite
1339 if "tflite" not in excluded_framework_list:
1340 # Convert the model to TFLite flatbuffer
1341 module = tf.Module()
Tai Lycf84bc92023-09-07 20:49:09 +00001342
1343 if op_name == "callonce" or op_name == "lstm_stateful":
1344 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1345 [concrete_function], fcn_node
1346 )
1347 else:
1348 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1349 [concrete_function], module
1350 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001351
1352 converter.experimental_new_converter = True
1353
1354 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1355 converter.inference_input_type = tf.float32
1356 converter.inference_output_type = tf.float32
1357 tflite_model = converter.convert()
1358
1359 # Write out converted model to disk
1360 tflite_model_filename = "model.tflite"
1361 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1362 f.write(tflite_model)
1363
1364 # Get TF reference result if .pb is specified
1365 if tf_model_filename:
1366 tf_result_npy_filename = "tf_result.npy"
1367 tf_result = concrete_function(*placeholder_vals)
1368 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1369
1370 tf_result_name = result_name
1371
1372 # Get TFLite inference result if .tflite is specified
1373 if tflite_model_filename:
1374 tflite_result_npy_filename = "tflite_result.npy"
1375
Luke Hutton5c844212023-01-27 14:17:52 +00001376 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001377
1378 if args.tflite_kernel_mode == "optimized" or (
1379 op_name in ops_with_optimized_only_kernel
1380 ):
1381 interpreter = tf.lite.Interpreter(
1382 model_path=os.path.join(test_dir, tflite_model_filename)
1383 )
1384 elif args.tflite_kernel_mode == "reference":
1385 interpreter = tf.lite.Interpreter(
1386 model_path=os.path.join(test_dir, tflite_model_filename),
1387 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1388 )
1389 else:
1390 assert 0, "unknown tflite interpreter mode {}".format(
1391 args.tflite_kernel_mode
1392 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001393
1394 input_details = interpreter.get_input_details()
1395 output_details = interpreter.get_output_details()
1396
Jerry Ge28811d92023-12-05 00:53:26 +00001397 # Prototype dynamic_shape testing
1398 # Need to resize the input tensors to known shapes when evaluating
1399 for idx, val in enumerate(placeholder_vals):
1400 interpreter.resize_tensor_input(
1401 input_details[idx]["index"], placeholder_shapes[idx]
1402 )
1403 interpreter.allocate_tensors()
1404
Jeremy Johnson015c3552022-02-23 12:15:03 +00001405 assert len(input_details) == len(
1406 placeholder_vals
1407 ), "number of placeholder mismatch"
1408
1409 for idx, val in enumerate(placeholder_vals):
1410 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1411
1412 interpreter.invoke()
1413 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1414
1415 np.save(
1416 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1417 )
1418
1419 # Result tensor name would change after converting to TFLite flatbuffer
1420 # Overwrite the information from TFLite models directly.
1421 # Assume single result tensor now
1422 tflite_result_name = output_details[0]["name"]
1423
Eric Kunze97b00272023-07-20 10:52:56 -07001424 _, test_name = os.path.split(test_dir)
1425
Jeremy Johnson015c3552022-02-23 12:15:03 +00001426 # Write out test descriptor
1427 write_test_json(
1428 filename=os.path.join(test_dir, "test.json"),
1429 tf_model_filename=tf_model_filename,
1430 tf_result_npy_filename=tf_result_npy_filename,
1431 tf_result_name=tf_result_name,
1432 tflite_model_filename=tflite_model_filename,
1433 tflite_result_npy_filename=tflite_result_npy_filename,
1434 tflite_result_name=tflite_result_name,
1435 ifm_name=placeholder_names,
1436 ifm_file=placeholder_npy_filenames,
1437 ifm_shape=placeholder_shapes,
TatWai Chong6a46b252024-01-12 13:13:22 -08001438 ifm_dynamic=placeholder_dynamic,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001439 framework_exclusions=excluded_framework_list,
1440 quantized=is_quantized,
Eric Kunze97b00272023-07-20 10:52:56 -07001441 test_name=test_name,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001442 )
1443 except Exception as e:
1444 msg = "Error running task: {}".format(e)
1445 print(msg)
1446 print(
1447 "".join(
1448 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1449 )
1450 )
1451 return False
1452 return True
1453
1454
1455def build_const_net(
1456 args,
1457 curr_shape,
1458 op_name,
1459 dtype,
1460 excluded_framework_list,
1461 quantized_inference_dtype,
1462 result_name,
1463 seed,
1464 rng,
1465 filter,
1466 unit_test_args,
1467):
1468
1469 if quantized_inference_dtype:
1470 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1471 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1472 else:
1473 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1474 test_dir = os.path.join(args.output_dir, test_dir)
1475
1476 # If the operator has an additional function to generate arguments, call it
1477 # here and iterate through the argument list that it generates
1478 op = TF_OP_LIST[op_name]
1479 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1480
TatWai Chongfd629052022-07-25 04:01:58 +00001481 try:
1482 rank_lo, rank_hi = op["rank"]
1483 except KeyError:
1484 # Set testing rank to (1, 4) in default.
1485 rank_lo = 1
1486 rank_hi = 4
1487
1488 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1489 return
1490
Jeremy Johnson015c3552022-02-23 12:15:03 +00001491 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1492 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001493 # Only filter on the full test_name, not the output directory
1494 _, test_name = os.path.split(test_dir + desc)
1495 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001496 unit_test_args.append(
1497 [
1498 op_name,
1499 args,
1500 test_dir + desc,
1501 curr_shape,
1502 addl_args,
1503 dtype,
1504 excluded_framework_list,
1505 quantized_inference_dtype,
1506 result_name,
1507 seed,
1508 ]
1509 )
1510
1511
1512# python hash is not reproducible, create hash for our purpose
1513def op_name_hash(op_name):
1514 result = 0xDEADBEEF
1515 for ch in op_name:
1516 if result & 1:
1517 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1518 else:
1519 result = (ord(ch) << 24) ^ (result >> 1)
1520
1521 return result
1522
1523
1524def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1525
1526 if not args.quiet:
1527 print(
1528 "Generating tests for {} ".format(
1529 op_name
1530 )
1531 )
1532
1533 op = TF_OP_LIST[op_name]
1534
1535 # Seed the RNG so that we get the same random tests for each test each time
1536 # If the number of tests for a given generation function changes, the tests
1537 # for that operator may also change accordingly, but this will at least keep
1538 # down churn across operators.
1539
1540 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1541 np.int32
1542 ).max
1543 rng = np.random.default_rng(bounded_hash_val)
1544
1545 # this is a dictionary with 'tf' and 'tflite' as key
1546 # and value being the data types we want to test under these framework
1547
1548 if isinstance(op["types"], dict):
1549 try:
1550 tf_dtypes = op["types"]["tf"]
1551 except KeyError:
1552 tf_dtypes = []
1553 try:
1554 tflite_dtypes = op["types"]["tflite"]
1555 except KeyError:
1556 tflite_dtypes = []
1557 elif isinstance(op["types"], list):
1558 tf_dtypes = op["types"]
1559 tflite_dtypes = op["types"]
1560
1561 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1562 tflite_quantized_dtypes = []
1563 tflite_nonquantized_dtypes = []
1564 for dtype in tflite_dtypes:
1565 if isinstance(dtype, QuantType):
1566 tflite_quantized_dtypes.append(dtype)
1567 else:
1568 tflite_nonquantized_dtypes.append(dtype)
1569
1570 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1571 set(tflite_nonquantized_dtypes)
1572 )
1573 nonquantized_dtypes = list(nonquantized_dtypes_set)
1574 quantized_dtypes = tflite_quantized_dtypes
1575
Jerry Ge5dd5a552023-05-23 22:41:20 +00001576 # append custom_shapes or replace shape_list with custom_shapes
1577 try:
1578 custom_shapes = op["custom_shapes"]
1579 if custom_shapes["custom_shape_only"]:
1580 shape_list = custom_shapes["shape_list"]
1581 else:
Jerry Geabdac232023-06-12 16:27:16 +00001582 shape_list = shape_list.copy()
Won Jeonf9c0cee2023-09-18 16:32:45 -07001583 shape_list.extend(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001584 except KeyError:
1585 pass
1586
Jeremy Johnson015c3552022-02-23 12:15:03 +00001587 # populate non quantized unit test arguments
1588 for dtype in nonquantized_dtypes:
1589
1590 excluded_framework_set = set(ALL_FRAMEWORKS)
1591 if dtype in tf_nonquantized_dtypes:
1592 excluded_framework_set.remove("tf")
1593 if dtype in tflite_nonquantized_dtypes:
1594 excluded_framework_set.remove("tflite")
1595 excluded_framework_list = list(excluded_framework_set)
1596
1597 for curr_shape in shape_list:
1598 build_const_net(
1599 args,
1600 curr_shape,
1601 op_name,
1602 dtype,
1603 excluded_framework_list,
1604 None,
1605 result_name,
1606 bounded_hash_val,
1607 rng,
1608 filter,
1609 unit_test_args,
1610 )
1611
1612 # populate quantized unit test arguments
1613 # must exclude 'tf' and source dtype being tf.float32
1614 for dtype in quantized_dtypes:
1615 for curr_shape in shape_list:
1616 build_const_net(
1617 args,
1618 curr_shape,
1619 op_name,
1620 tf.float32,
1621 ["tf"],
1622 dtype,
1623 result_name,
1624 bounded_hash_val,
1625 rng,
1626 filter,
1627 unit_test_args,
1628 )
1629
1630 return unit_test_args
1631
1632
1633def createDynamicOpLists():
1634 """The templated operators are conv2d-style operators with a number of kernel
1635 sizes. Since the operator is unchanged, we generate the range of kernel
1636 sizes here in this loop and remove the original templates from the list.
1637
1638 This could be expanded to non-conv2d-style operators in the future."""
1639
1640 # Dynamically create op lists for convolutions with a list of kernel sizes
1641 KERNELS = [
1642 [1, 1],
1643 [3, 3],
1644 [5, 5],
1645 ]
1646
TatWai Chongfd629052022-07-25 04:01:58 +00001647 # dim = [D, H, W]
1648 KERNELS_3D = [
1649 [1, 1, 1],
1650 [2, 3, 3],
1651 [3, 5, 5],
1652 ]
1653
Jeremy Johnson015c3552022-02-23 12:15:03 +00001654 TEMPLATE_LIST = [
1655 "conv2d",
1656 "conv2d_bias",
1657 "conv2d_relu",
1658 "conv2d_relu6",
1659 "conv2d_relu_n1_to_1",
1660 "conv2d_tanh",
1661 "depthwise_conv2d",
1662 "depthwise_conv2d_bias",
1663 "transpose_conv2d",
1664 ]
1665
TatWai Chongfd629052022-07-25 04:01:58 +00001666 TEMPLATE_LIST_CONV3D = [
1667 "conv3d",
1668 "conv3d_bias",
1669 ]
1670
Jeremy Johnson015c3552022-02-23 12:15:03 +00001671 for t in TEMPLATE_LIST:
1672 for k in KERNELS:
1673 testName = "{}_{}x{}".format(t, k[0], k[1])
1674 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1675 TF_OP_LIST[testName]["filter"] = k
1676 TF_OP_LIST[testName]["template"] = False
1677
TatWai Chongfd629052022-07-25 04:01:58 +00001678 # The existing operators don't support the dimension of kernel that is higher than 2.
1679 for t in TEMPLATE_LIST_CONV3D:
1680 for k in KERNELS_3D:
1681 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1682 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1683 TF_OP_LIST[testName]["filter"] = k
1684 TF_OP_LIST[testName]["template"] = False
1685
Jeremy Johnson015c3552022-02-23 12:15:03 +00001686 # Delete any templates after having created any dynamic ops
1687 # This is a two-pass operation because it's bad practice to delete
1688 # keys from dictionaries while iterating
1689 keyList = []
1690 for k in TF_OP_LIST:
1691 try:
1692 if TF_OP_LIST[k]["template"]:
1693 keyList.append(k)
1694 continue
1695 except KeyError:
1696 pass
1697
1698 for k in keyList:
1699 del TF_OP_LIST[k]
1700
1701
1702def main():
1703 parser = argparse.ArgumentParser()
1704 parser.add_argument(
1705 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1706 )
1707 parser.add_argument(
1708 "--random-shapes",
1709 dest="random_shapes",
1710 default=0,
1711 type=int,
1712 help=(
1713 "Use N random shapes of each rank for generating tests,"
1714 "seeded with random seed"
1715 ),
1716 )
1717 parser.add_argument(
1718 "-o",
1719 "--output-dir",
1720 dest="output_dir",
1721 default=".",
1722 type=str,
1723 help="Test output directory path prefix",
1724 )
1725 parser.add_argument(
1726 "-q",
1727 "--quiet",
1728 dest="quiet",
1729 default=False,
1730 action="store_true",
1731 help="Do not print test names",
1732 )
1733 parser.add_argument(
1734 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1735 )
1736 parser.add_argument(
1737 "-m",
1738 "--tflite-kernel-mode",
1739 dest="tflite_kernel_mode",
1740 type=str,
1741 choices=["reference", "optimized"],
1742 default="reference",
1743 help="TFLite interpreter kernel mode",
1744 )
1745 parser.add_argument(
1746 "--num-samples",
1747 dest="num_samples",
1748 default=200,
1749 type=int,
1750 help="Number of input samples for post-training quantization",
1751 )
1752 parser.add_argument(
1753 "--filter",
1754 dest="filter",
1755 default="",
1756 type=str,
1757 help="Filter test names by this expression",
1758 )
1759 args = parser.parse_args()
1760
1761 # Turn the filter into a re object if present
1762 filter = None
1763 if args.filter != "":
1764 filter = re.compile(args.filter)
1765
1766 # Autodetect CPU count
1767 if args.jobs <= 0:
1768 args.jobs = os.cpu_count()
1769
1770 # Disable TF info messages
1771 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1772
1773 try:
1774 os.makedirs(args.output_dir)
1775 except FileExistsError:
1776 pass
1777
1778 if args.random_shapes:
1779 gen_rand_shapes(args)
1780
1781 # Build dynamic ops
1782 createDynamicOpLists()
1783
1784 # Generate the test list and arguments to run_unit_test()
1785 unit_test_args = []
1786
1787 for op in TF_OP_LIST:
1788 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1789
1790 errors = 0
1791 for t in unit_test_args:
1792 if not run_unit_test(*t):
1793 errors = errors + 1
1794
1795 if not args.quiet:
1796 print("\nAll tasks done - with {} errors".format(errors))
1797
1798 return 1 if errors else 0
1799
1800
1801if __name__ == "__main__":
1802 exit(main())