blob: 3a9c0ca8a4ebdb2a3e2a3e13210ef098a0c7ead8 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Jerry Ge54bb61e2023-12-20 22:21:24 +00002# Copyright (c) 2020-2024, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
Tai Lycf84bc92023-09-07 20:49:09 +000031
Jeremy Johnson015c3552022-02-23 12:15:03 +000032from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700177 "rank": (0, 4),
178 "custom_shapes": {
179 "custom_shape_only": False,
180 "shape_list": [()],
181 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000182 },
183 "bitwise_and": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_or": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_not": {
194 "operands": (1, 0),
195 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "bitwise_xor": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
201 "types": {"tf": TYPE_I}, # Not supported in TF Lite
202 },
203 "logical_and": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_or": {
209 "operands": (2, 0),
210 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "logical_not": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
216 "types": TYPE_B,
217 },
218 "reduce_any": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
221 "types": TYPE_B,
222 },
223 "reduce_all": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800226 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000227 },
228 "reduce_min": {
229 "operands": (1, 0),
230 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
231 "types": {
232 "tf": TYPE_FI,
233 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
234 },
235 },
236 "reduce_max": {
237 "operands": (1, 0),
238 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
239 "types": {
240 "tf": TYPE_FI,
241 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
242 },
243 },
244 "reduce_sum": {
245 "operands": (1, 0),
246 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
247 "types": {
248 "tf": TYPE_F,
249 # v2 converter doesn't recognize quantized reduce_sum
250 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
251 "tflite": TYPE_F,
252 },
253 },
254 "reduce_mean": {
255 "operands": (1, 0),
256 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
257 "types": {
258 "tf": TYPE_F,
259 "tflite": list(
260 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
261 ),
262 },
263 },
264 "reduce_product": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
267 "types": TYPE_F,
268 },
269 "min": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "max": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
277 "types": TYPE_FI,
278 },
279 "pow": {
280 "operands": (2, 0),
281 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
282 # Technically, integer is supported, but only for positive exponents.
283 # Needs a random argument generator.
284 "types": TYPE_F,
285 },
286 "abs": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "ceil": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "floor": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "log": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "negate": {
307 "operands": (1, 0),
308 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
309 "types": TYPE_F,
310 },
311 "rsqrt": {
312 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800313 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
317 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000318 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800319 "sign": {
320 "operands": (1, 0),
321 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
322 "types": {
323 "tf": TYPE_F,
324 },
325 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000326 "sigmoid": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
336 "tanh": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
339 "types": {
340 "tf": TYPE_F,
341 "tflite": list(
342 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
343 ),
344 },
345 },
Won Jeon78155c62023-06-10 00:20:04 +0000346 "erf": {
347 "operands": (1, 0),
348 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tf": TYPE_F,
351 },
352 },
Luke Hutton41601862022-12-06 17:29:15 +0000353 "sin": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000356 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000357 },
358 "cos": {
359 "operands": (1, 0),
360 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000361 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000362 },
Luke Hutton2138a192022-12-15 11:01:39 +0000363 "atan2": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
366 "types": {
367 "tflite": TYPE_F,
368 },
369 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000370 "square": {
371 "operands": (1, 0),
372 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
373 "types": TYPE_F,
374 },
375 "squared_difference": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000378 "types": {
379 "tf": TYPE_F,
380 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
381 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000382 },
383 "equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "greater_equal": {
389 "operands": (2, 0),
390 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
391 "types": TYPE_FI,
392 },
393 "greater": {
394 "operands": (2, 0),
395 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
396 "types": TYPE_FI,
397 },
398 "less": {
399 "operands": (2, 0),
400 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
401 "types": TYPE_FI,
402 },
403 "less_equal": {
404 "operands": (2, 0),
405 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
406 "types": TYPE_FI,
407 },
408 "conv2d_TEMPLATE": {
409 "operands": (1, 1),
410 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 "conv2d_relu6_TEMPLATE": {
437 "operands": (1, 2),
438 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
439 "types": {
440 "tf": [tf.float32],
441 "tflite": [
442 tf.float32,
443 QuantType.CONV_U8_U8,
444 QuantType.CONV_I8_I8,
445 QuantType.CONV_I16_I8,
446 ],
447 },
448 "template": True,
449 },
450 "conv2d_relu_n1_to_1_TEMPLATE": {
451 "operands": (1, 2),
452 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
453 "types": {
454 "tf": [tf.float32],
455 "tflite": [
456 tf.float32,
457 QuantType.CONV_U8_U8,
458 QuantType.CONV_I8_I8,
459 QuantType.CONV_I16_I8,
460 ],
461 },
462 "template": True,
463 },
464 # This test is converted as:
465 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
466 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
467 "conv2d_tanh_TEMPLATE": {
468 "operands": (1, 2),
469 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
470 "types": {
471 "tf": [tf.float32],
472 "tflite": [
473 tf.float32,
474 QuantType.CONV_U8_U8,
475 QuantType.CONV_I8_I8,
476 QuantType.CONV_I16_I8,
477 ],
478 },
479 "template": True,
480 },
481 "conv2d_bias_TEMPLATE": {
482 "operands": (1, 2),
483 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
484 "types": {
485 "tf": [tf.float32],
486 "tflite": [
487 tf.float32,
488 QuantType.CONV_U8_U8,
489 QuantType.CONV_I8_I8,
490 QuantType.CONV_I16_I8,
491 ],
492 },
493 "bias": True,
494 "template": True,
495 },
TatWai Chongfd629052022-07-25 04:01:58 +0000496 "conv3d_TEMPLATE": {
497 "operands": (1, 1),
498 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 # Quantization to 16x8-bit not yet supported by tflite.
506 ],
507 },
508 "template": True,
509 "rank": (1, 5),
510 },
511 "conv3d_bias_TEMPLATE": {
512 "operands": (1, 2),
513 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 # Quantization to 16x8-bit not yet supported by tflite.
521 ],
522 },
523 "bias": True,
524 "template": True,
525 "rank": (1, 5),
526 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000527 "depthwise_conv2d_TEMPLATE": {
528 "operands": (1, 1),
529 "build_fcn": (
530 TBuilder.DepthwiseConv2d,
531 TGen.tgDepthwiseConv2d,
532 ArgGen.agDepthwiseConv2d,
533 ),
534 "types": {
535 "tf": [tf.float32],
536 "tflite": [
537 tf.float32,
538 QuantType.CONV_U8_U8,
539 QuantType.CONV_I8_I8,
540 QuantType.CONV_I16_I8,
541 ],
542 },
543 "template": True,
544 },
545 "depthwise_conv2d_bias_TEMPLATE": {
546 "operands": (1, 2),
547 "build_fcn": (
548 TBuilder.DepthwiseConv2dWithBias,
549 TGen.tgDepthwiseConv2d,
550 ArgGen.agDepthwiseConv2d,
551 ),
552 "types": {
553 "tf": [tf.float32],
554 "tflite": [
555 tf.float32,
556 QuantType.CONV_U8_U8,
557 QuantType.CONV_I8_I8,
558 QuantType.CONV_I16_I8,
559 ],
560 },
561 "bias": True,
562 "template": True,
563 },
564 "transpose_conv2d_TEMPLATE": {
565 "operands": (1, 1),
566 "build_fcn": (
567 TBuilder.TransposeConv2d,
568 TGen.tgTransposeConv2d,
569 ArgGen.agTransposeConv2d,
570 ),
571 "types": {
572 "tf": [tf.float32],
573 "tflite": [
574 tf.float32,
575 QuantType.CONV_U8_U8,
576 QuantType.CONV_I8_I8,
577 QuantType.CONV_I16_I8,
578 ],
579 },
580 "template": True,
581 },
582 "argmax": {
583 "operands": (1, 0),
584 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
585 "types": {"tf": TYPE_F},
586 },
587 "avg_pool2d": {
588 "operands": (1, 0),
589 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
590 "types": {
591 "tf": TYPE_F,
592 "tflite": list(
593 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
594 ),
595 },
596 },
597 "max_pool2d": {
598 "operands": (1, 0),
599 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
600 "types": {
601 "tf": TYPE_F,
602 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
603 # ALL_I16 not supported yet
604 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
605 # QI16 is missing from MaxPoolOperandAndResultConstraints
606 # If adding QI16 back this test can run through.
607 },
608 },
609 "reshape": {
610 "operands": (1, 0),
611 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
612 "types": TYPE_FI,
613 },
614 "transpose": {
615 "operands": (1, 0),
616 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
617 "types": TYPE_FI,
618 },
619 "slice": {
620 "operands": (1, 0),
621 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
622 "types": TYPE_FI,
623 },
624 "strided_slice": {
625 "operands": (1, 0),
626 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
627 "types": TYPE_FI,
628 },
629 "select": {
630 "operands": (3, 0),
631 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
632 "types": TYPE_FI,
633 },
634 "addn": {
635 "operands": (4, 0),
636 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
637 "types": TYPE_FI,
638 },
639 "concatv2": {
640 "operands": (4, 0),
641 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
642 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700643 "rank": (0, 4),
644 "custom_shapes": {
645 "custom_shape_only": False,
646 "shape_list": [()],
647 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000648 },
649 "stack": {
650 "operands": (4, 0),
651 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
652 "types": TYPE_FI,
653 },
654 "unstack": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
657 "types": TYPE_F,
658 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000659 "mirrorpad": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
662 "types": TYPE_FI,
663 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000664 "pad": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800667 "types": {
668 "tf": TYPE_F,
669 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
670 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000671 },
672 "expand_dims": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
675 "types": TYPE_FI,
676 },
677 "shape": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
680 "types": TYPE_FI,
681 },
682 "rank": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_FI,
686 },
687 "fill": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
690 "types": TYPE_FI,
691 },
692 "elu": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
695 "types": TYPE_F,
696 },
697 "softmax": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
700 "types": {
701 "tf": TYPE_F,
702 "tflite": list(
703 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
704 ),
705 },
706 },
707 "log_softmax": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
710 "types": TYPE_F,
711 },
Jerry Ge28811d92023-12-05 00:53:26 +0000712 "dynamic_linear": {
713 "operands": (1, 0),
714 "build_fcn": (TBuilder.DynamicLinear, TGen.tgBasic, ArgGen.agNone),
715 "types": {
716 "tf": [],
717 "tflite": list(TYPE_F),
718 },
719 "custom_shapes": {
720 "custom_shape_only": True,
721 "shape_list": [(14, 19)],
722 },
723 # number of operands of tuples which spcifies which dim to set to None
724 # In this case, we have 1 input. So we have 1 tuple
725 # We're setting the first input's first dim to None
726 "dynamic_shape_dim": [
727 (0,),
728 ],
729 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000730 "matmul": {
731 "operands": (2, 0),
732 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
733 "types": {
734 "tf": TYPE_F,
735 "tflite": list(
736 TYPE_F
737 + [QuantType.ALL_U8, QuantType.ALL_I8]
738 # 16 bits matmul fail to convert
739 ),
740 },
741 },
742 "add_scalar": {
743 "operands": (1, 0),
744 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
745 "types": TYPE_F,
746 },
747 "add_1d": {
748 "operands": (2, 0),
749 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
750 "types": TYPE_F,
751 },
752 "split": {
753 "operands": (1, 0),
754 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
755 "types": TYPE_FI,
756 },
757 "tile": {
758 "operands": (1, 0),
759 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
760 "types": TYPE_FI,
761 },
762 "reverse": {
763 "operands": (1, 0),
764 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
765 "types": {"tf": TYPE_FI},
766 },
767 "gather": {
768 "operands": (1, 0),
769 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
770 "types": TYPE_FI,
771 },
772 "gather_nd": {
773 "operands": (1, 0),
774 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
775 "types": TYPE_FI,
776 },
777 "scatter_nd": {
778 "operands": (1, 0),
779 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
780 "types": TYPE_FI,
781 },
782 "space_to_batch": {
783 "operands": (1, 0),
784 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
785 "types": TYPE_F,
786 },
787 "batch_to_space": {
788 "operands": (1, 0),
789 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
790 "types": TYPE_F,
791 },
Jerry Ge28811d92023-12-05 00:53:26 +0000792 "dynamic_batch_to_space": {
793 "operands": (1, 0),
794 "build_fcn": (
795 TBuilder.DynamicBatchToSpace,
796 TGen.tgBasic,
797 ArgGen.agBatchToSpace,
798 ),
799 "types": TYPE_F,
800 "custom_shapes": {
801 "custom_shape_only": True,
802 "shape_list": [(8, 4, 4, 4)],
803 },
804 # number of operands of tuples which spcifies which dim to set to None
805 # In this case, we have 1 input. So we have 1 tuple
806 # We're setting the first input's 0th dim to None
807 "dynamic_shape_dim": [
808 (0,),
809 ],
810 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000811 "space_to_depth": {
812 "operands": (1, 0),
813 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
814 "types": TYPE_F,
815 },
Jerry Ge28811d92023-12-05 00:53:26 +0000816 "dynamic_space_to_depth": {
817 "operands": (1, 0),
818 "build_fcn": (TBuilder.DynamicSpaceToDepth, TGen.tgBasic, ArgGen.agNone),
819 "types": {
820 "tf": [],
821 "tflite": list(TYPE_F),
822 },
823 "custom_shapes": {
824 "custom_shape_only": True,
825 "shape_list": [(1, 32, 32, 8)],
826 },
827 # number of operands of tuples which spcifies which dim to set to None
828 # In this case, we have 1 input. So we have 1 tuple
829 # We're setting the first input's third dim to None
830 "dynamic_shape_dim": [
831 (3,),
832 ],
833 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000834 "depth_to_space": {
835 "operands": (1, 0),
836 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
837 "types": TYPE_F,
838 },
Jerry Ge28811d92023-12-05 00:53:26 +0000839 "dynamic_depth_to_space": {
840 "operands": (1, 0),
841 "build_fcn": (TBuilder.DynamicDepthToSpace, TGen.tgBasic, ArgGen.agNone),
842 "types": {
843 "tf": [],
844 "tflite": list(TYPE_F),
845 },
846 "custom_shapes": {
847 "custom_shape_only": True,
848 "shape_list": [(1, 1, 1, 4)],
849 },
850 # number of operands of tuples which spcifies which dim to set to None
851 # In this case, we have 1 input. So we have 1 tuple
852 # We're setting the first input's third dim to None
853 "dynamic_shape_dim": [
854 (3,),
855 ],
856 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000857 "one_hot": {
858 "operands": (3, 1),
859 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
860 "types": TYPE_FI,
861 },
862 "fakequant": {
863 "operands": (1, 0),
864 "build_fcn": (
865 TBuilder.Fakequant,
866 TGen.tgBasic,
867 ArgGen.agFakequant,
868 ),
869 "types": {"tf": TYPE_F},
870 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800871 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000872 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800873 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700874 "types": {
875 "tf": TYPE_F,
876 "tflite": list(
877 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
878 ),
879 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000880 "custom_shapes": {
881 "custom_shape_only": False,
882 "shape_list": [(3, 1, 1, 7)],
883 },
TatWai Chongf7326092022-06-08 12:17:14 -0700884 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000885 "left_shift": {
886 "operands": (1, 0),
887 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
888 "types": {"tf": [tf.int32]},
889 },
890 "right_shift": {
891 "operands": (1, 0),
892 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
893 "types": {
894 "tf": [
895 tf.int32,
896 ]
897 },
898 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700899 "while": {
900 "operands": (1, 0),
901 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
902 "types": {
903 "tflite": list(TYPE_F),
904 },
905 },
906 "lstm": {
907 "operands": (1, 0),
908 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
909 "types": {
910 "tflite": [
911 tf.float32,
912 # tf.int32
913 ]
914 },
915 },
Tai Lycf84bc92023-09-07 20:49:09 +0000916 "lstm_stateful": {
917 "operands": (1, 0),
918 "build_fcn": (TBuilder.SLSTM, TGen.tgRecurrent, ArgGen.agNone),
919 "types": {
920 "tflite": [
921 tf.float32,
922 ]
923 },
924 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700925 "gru": {
926 "operands": (1, 0),
927 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
928 "types": {
929 "tflite": [
930 tf.float32,
931 # tf.int32
932 ]
933 },
934 },
935 "rnn": {
936 "operands": (1, 0),
937 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
938 "types": {
939 "tflite": [
940 tf.float32,
941 ]
942 },
943 },
Tai Lycf84bc92023-09-07 20:49:09 +0000944 "callonce": {
945 "operands": (1, 0),
946 "build_fcn": (TBuilder.CallOnce, TGen.tgBasic, ArgGen.agNone),
947 "types": {
948 "tflite": [tf.float32],
949 },
950 "custom_shapes": {
951 "custom_shape_only": True,
952 "shape_list": [(1,)],
953 },
954 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000955 "rfft2d": {
956 "operands": (1, 0),
957 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
958 "types": {
959 "tflite": TYPE_F,
960 },
961 },
Luke Hutton714aa602023-02-08 19:45:26 +0000962 "real": {
963 "operands": (1, 0),
964 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
965 "types": {
966 "tflite": [tf.complex64],
967 },
968 },
969 "imag": {
970 "operands": (1, 0),
971 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
972 "types": {
973 "tflite": [tf.complex64],
974 },
975 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000976 "broadcastto": {
977 "operands": (1, 1),
978 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
979 "types": {
980 "tf": TYPE_FIB,
981 },
982 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000983}
984
985# Shapes to be tested; default can be overwritten
986shape_list = [
987 (1,),
988 (64,),
989 (14, 19),
990 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000991 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000992 (1, 4, 4, 4),
993 (1, 8, 4, 17),
994 (1, 4, 8, 19),
995 (1, 32, 32, 8),
996 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800997 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000998 (2, 2, 7, 7, 2),
999 (1, 4, 8, 21, 17),
1000 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001001]
1002
1003
1004def gen_rand_shapes(args):
1005 """Overwrite the global shape list with a new list of random shapes"""
1006 global shape_list
1007
1008 rng = np.random.default_rng(args.random_seed)
1009
1010 # Don't let things get too big... cap the maximum volume, but let
1011 # an individual dimension be 1..47
1012 max_total_volume = 32 * 32 * 4
1013
1014 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +00001015 # Only iterate over ranks 2, 3, 4, and 5
1016 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001017 for n in range(args.random_shapes):
1018 new_shape = rng.integers(1, 48, size=rank)
1019
TatWai Chongfd629052022-07-25 04:01:58 +00001020 # Set the batch dimension on 4D or 5D objects to 1
1021 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +00001022 new_shape[0] = 1
1023
1024 # Limit the total shape volume and throw out any
1025 # shapes that wouldn't leave at least size=2 in some non-batch dimension
1026 volume = 1
1027 skip_shape = False
1028 for i in range(rank):
1029
1030 volume *= new_shape[i]
1031
1032 # Reduce the shape, while it's larger than the maximum volume
1033 while volume > max_total_volume:
1034 new_shape[i] = new_shape[i] // 2
1035 volume = volume // 2
1036
1037 # Now an untenable dimension size? Skip this one.
1038 if new_shape[i] < 1:
1039 skip_shape = True
1040
1041 if not skip_shape:
1042 shape_list.append(tuple(new_shape))
1043
1044
1045# Construct, run and save a whole tensorflow tf.function to a protobuf file
1046# or convert to .tflite if it's quantized unit test
1047def run_unit_test(
1048 op_name,
1049 args,
1050 test_dir,
1051 curr_shape,
1052 addl_args,
1053 dtype,
1054 excluded_framework_list,
1055 quantized_inference_dtype,
1056 result_name,
1057 seed,
1058):
1059
1060 try:
1061 op = TF_OP_LIST[op_name]
1062 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1063
1064 # Get and seed a random number generator for this test
1065 rng = np.random.default_rng(seed)
1066
1067 # return placeholders=(str: name, np.array: value)
1068 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +00001069 placeholders, consts = (
Won Jeon6c93f412023-07-08 07:04:08 +00001070 tensor_gen_fcn(op, curr_shape, dtype, rng, False)
Won Jeone2325d12023-06-10 15:25:54 +00001071 if tensor_gen_fcn.__name__ == "tgBFuzz"
1072 else tensor_gen_fcn(op, curr_shape, dtype, rng)
1073 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001074
1075 # if test doesn't have any placeholders/consts, terminated
1076 if len(placeholders) == 0 and len(consts) == 0:
1077 return True
1078
1079 if not args.quiet:
1080 print(" {} ".format(test_dir))
1081
1082 try:
1083 os.mkdir(test_dir)
1084 except FileExistsError:
1085 pass
1086
1087 const_nodes = [value for name, value in consts]
1088
1089 num_placeholders = len(placeholders)
1090 # if test is quantized, create tensor quantization metadata info for
1091 # each input tensor, based on different quantized type
1092 if quantized_inference_dtype:
1093 is_quantized = True
1094 # TODO: support INT8 IFM x INT4 weight later
1095 if quantized_inference_dtype == QuantType.ALL_U8:
1096 qzero = [128] * num_placeholders
1097 numpy_dtype = [np.uint8] * num_placeholders
1098 tflite_inference_dtype = tf.uint8
1099 elif quantized_inference_dtype == QuantType.ALL_I8:
1100 qzero = [0] * num_placeholders
1101 numpy_dtype = [np.int8] * num_placeholders
1102 tflite_inference_dtype = tf.int8
1103 elif quantized_inference_dtype == QuantType.ALL_I16:
1104 qzero = [0] * num_placeholders
1105 numpy_dtype = [np.int16] * num_placeholders
1106 tflite_inference_dtype = tf.int16
1107 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1108 assert (
1109 num_placeholders == 1
1110 ), "Unsupported number of placeholders for Convolution: {}".format(
1111 num_placeholders
1112 )
1113 qzero = [128] * num_placeholders
1114 if num_placeholders == 2:
1115 numpy_dtype = [np.uint8, np.uint8]
1116 else:
1117 numpy_dtype = [np.uint8, np.uint8, np.int32]
1118 tflite_inference_dtype = tf.uint8
1119 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1120 assert (
1121 num_placeholders == 1
1122 ), "Unsupported number of placeholders for Convolution: {}".format(
1123 num_placeholders
1124 )
1125 qzero = [0] * num_placeholders
1126 if num_placeholders == 2:
1127 numpy_dtype = [np.int8, np.int8]
1128 else:
1129 numpy_dtype = [np.int8, np.int8, np.int32]
1130 tflite_inference_dtype = tf.int8
1131 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1132 assert (
1133 num_placeholders == 1
1134 ), "Unsupported number of placeholders for Convolution: {}".format(
1135 num_placeholders
1136 )
1137 if num_placeholders == 2:
1138 qzero = [0, 0]
1139 numpy_dtype = [np.int16, np.int8]
1140 else:
1141 qzero = [0, 0, 0]
1142 numpy_dtype = [
1143 np.int16,
1144 np.int8,
1145 np.int64,
1146 ] # np.int64 to represent 40 bits accumulator
1147 tflite_inference_dtype = tf.int16
1148 else:
1149 raise Exception(
1150 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1151 )
1152
1153 else:
1154 is_quantized = False
1155
1156 tf_model_filename = None
1157 tf_result_npy_filename = None
1158 tf_result_name = None
1159
1160 tflite_model_filename = None
1161 tflite_result_npy_filename = None
1162 tflite_result_name = None
1163
1164 placeholder_names = []
1165 placeholder_vals = []
1166 placeholder_signatures = ()
1167 placeholder_npy_filenames = []
1168 placeholder_shapes = []
1169
1170 for idx, (name, val) in enumerate(placeholders):
Jerry Ge54bb61e2023-12-20 22:21:24 +00001171 input_shape = tuple(val.shape)
1172
Jerry Ge28811d92023-12-05 00:53:26 +00001173 try:
1174 dynamic_shape_dim_tuples = op["dynamic_shape_dim"]
1175 dim_tuple = dynamic_shape_dim_tuples[idx]
1176 dim = dim_tuple[0]
Jerry Ge54bb61e2023-12-20 22:21:24 +00001177 input_shape = list(input_shape)
Jerry Ge28811d92023-12-05 00:53:26 +00001178 input_shape[dim] = None
Jerry Ge28811d92023-12-05 00:53:26 +00001179
Jerry Ge54bb61e2023-12-20 22:21:24 +00001180 addl_args.append(tuple(input_shape))
Jerry Ge28811d92023-12-05 00:53:26 +00001181 except KeyError:
1182 pass
1183
Jeremy Johnson015c3552022-02-23 12:15:03 +00001184 placeholder_names.append(name)
1185 placeholder_signatures = placeholder_signatures + (
Jerry Ge54bb61e2023-12-20 22:21:24 +00001186 tf.TensorSpec(shape=input_shape, dtype=val.dtype, name=name),
Jeremy Johnson015c3552022-02-23 12:15:03 +00001187 )
1188 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1189 placeholder_shapes.append(val.shape)
1190
1191 # Get test builder class
1192 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1193 concrete_function = tf.function(input_signature=placeholder_signatures)(
1194 fcn_node.eval
1195 ).get_concrete_function()
1196
1197 if is_quantized:
1198
1199 assert dtype is tf.float32, "quantized test must come from float32 graph"
1200
1201 # 1. Quantize float placeholder npy to quantized to feed the graph
1202 for idx, (name, val) in enumerate(placeholders):
1203
1204 # we use np.amin()/np.amax() to determine dynamic range
1205 # for quantized test
1206 zeropoint = 0
1207 scale = 1.0
1208 if numpy_dtype[idx] != np.int64:
1209 qmin = np.iinfo(numpy_dtype[idx]).min
1210 qmax = np.iinfo(numpy_dtype[idx]).max
1211 num_bits = np.iinfo(numpy_dtype[idx]).bits
1212 # 40 bit is represented as np.int64
1213 else:
1214 num_bits = 40
1215 qmin = -(1 << num_bits)
1216 qmax = (1 << num_bits) - 1
1217
1218 min_val = np.amin(val)
1219 max_val = np.amax(val)
1220
1221 # for single value tensor, we set scale equal to the abs(value),
1222 # and fix zeropoint to 128
1223 # if val > 0, it'll be represented as 129,
1224 # where val = (129 - 128) * val
1225 # if val < 0, it'll be represented as 127,
1226 # where val = (127 - 128) * (-val)
1227 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1228 # and let quantized 1 represent the value
1229 # also adjust effective min/max consequently
1230 if max_val == min_val:
1231 if max_val != 0:
1232 scale = abs(max_val)
1233 else:
1234 scale = 1.0
1235 min_val = float(qmin - qzero[idx]) * scale
1236 max_val = float(qmax - qzero[idx]) * scale
1237 else:
1238 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeon6c93f412023-07-08 07:04:08 +00001239 if op_name == "squared_difference":
1240 zeropoint = -int(round((-min_val) / scale)) + qmin
1241 else:
1242 zeropoint = int(round((-min_val) / scale)) + qmin
Jeremy Johnson015c3552022-02-23 12:15:03 +00001243
1244 # run through tf.fakequant first to assure quantization error aligned
1245 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1246 val,
1247 min=min_val,
1248 max=max_val,
1249 num_bits=num_bits,
1250 name="gen_quant_npy",
1251 )
1252
Jerry Ged69e2832023-07-05 21:54:07 +00001253 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001254
1255 # very few unit tests after TF hash may/2020, this quantized
1256 # value for some reason exceed [0, 255] range
1257 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1258
Jeremy Johnson015c3552022-02-23 12:15:03 +00001259 np.save(
1260 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001261 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001262 False,
1263 )
1264
1265 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1266
1267 # 2. Convert the model to quantized TFLite flatbuffer
1268 module = tf.Module()
1269 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1270 [concrete_function], module
1271 )
1272 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1273 converter.experimental_new_converter = True
1274
1275 # use MLIR-based post-quantizer
1276 converter.experimental_new_quantizer = True
1277
1278 flag = (
1279 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1280 )
1281 if tflite_inference_dtype == tf.int16:
1282 converter.target_spec.supported_ops = [flag]
1283
Won Jeone2325d12023-06-10 15:25:54 +00001284 # Generator function for integer quantization of TFLiteConverter
1285 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1286 # to calibrate/estimate the range of the floating-point inputs.
1287 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1288 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001289 def input_stats():
1290 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001291 placeholders, _ = (
Won Jeon6c93f412023-07-08 07:04:08 +00001292 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, True)
Won Jeone2325d12023-06-10 15:25:54 +00001293 if tensor_gen_fcn == "tgBFuzz"
1294 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1295 )
1296 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001297
1298 converter.representative_dataset = input_stats
1299 converter.inference_input_type = tflite_inference_dtype
1300 converter.inference_output_type = tflite_inference_dtype
1301
1302 tflite_model = converter.convert()
1303
1304 tflite_model_filename = "model.tflite"
1305
1306 # Write out converted model to disk
1307 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1308 f.write(tflite_model)
1309
1310 else: # is_quantized is False
1311
1312 # 1. Saved out numpy array directly
1313 for idx, (name, val) in enumerate(placeholders):
1314 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001315
1316 # Complex tensors are expected to be repsesented by a
1317 # single floating point tensor of shape [?, ..., ?, 2].
1318 if val.dtype == np.complex64:
1319 val_shape = val.shape + (2,)
1320 val = val.view(np.float32)
1321 val = val.reshape(val_shape)
1322
Jeremy Johnson015c3552022-02-23 12:15:03 +00001323 np.save(
1324 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1325 )
1326
1327 # 2.a Saved out .pb if framework includes tensorflow
1328 if "tf" not in excluded_framework_list:
1329 # Write out graph as protobuf to disk
1330 tf_model_filename = "model.pb"
1331 tf.io.write_graph(
1332 concrete_function.graph, test_dir, tf_model_filename, True
1333 )
1334
1335 # 2.b Saved out .tflite if framework includes tflite
1336 if "tflite" not in excluded_framework_list:
1337 # Convert the model to TFLite flatbuffer
1338 module = tf.Module()
Tai Lycf84bc92023-09-07 20:49:09 +00001339
1340 if op_name == "callonce" or op_name == "lstm_stateful":
1341 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1342 [concrete_function], fcn_node
1343 )
1344 else:
1345 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1346 [concrete_function], module
1347 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001348
1349 converter.experimental_new_converter = True
1350
1351 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1352 converter.inference_input_type = tf.float32
1353 converter.inference_output_type = tf.float32
1354 tflite_model = converter.convert()
1355
1356 # Write out converted model to disk
1357 tflite_model_filename = "model.tflite"
1358 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1359 f.write(tflite_model)
1360
1361 # Get TF reference result if .pb is specified
1362 if tf_model_filename:
1363 tf_result_npy_filename = "tf_result.npy"
1364 tf_result = concrete_function(*placeholder_vals)
1365 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1366
1367 tf_result_name = result_name
1368
1369 # Get TFLite inference result if .tflite is specified
1370 if tflite_model_filename:
1371 tflite_result_npy_filename = "tflite_result.npy"
1372
Luke Hutton5c844212023-01-27 14:17:52 +00001373 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001374
1375 if args.tflite_kernel_mode == "optimized" or (
1376 op_name in ops_with_optimized_only_kernel
1377 ):
1378 interpreter = tf.lite.Interpreter(
1379 model_path=os.path.join(test_dir, tflite_model_filename)
1380 )
1381 elif args.tflite_kernel_mode == "reference":
1382 interpreter = tf.lite.Interpreter(
1383 model_path=os.path.join(test_dir, tflite_model_filename),
1384 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1385 )
1386 else:
1387 assert 0, "unknown tflite interpreter mode {}".format(
1388 args.tflite_kernel_mode
1389 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001390
1391 input_details = interpreter.get_input_details()
1392 output_details = interpreter.get_output_details()
1393
Jerry Ge28811d92023-12-05 00:53:26 +00001394 # Prototype dynamic_shape testing
1395 # Need to resize the input tensors to known shapes when evaluating
1396 for idx, val in enumerate(placeholder_vals):
1397 interpreter.resize_tensor_input(
1398 input_details[idx]["index"], placeholder_shapes[idx]
1399 )
1400 interpreter.allocate_tensors()
1401
Jeremy Johnson015c3552022-02-23 12:15:03 +00001402 assert len(input_details) == len(
1403 placeholder_vals
1404 ), "number of placeholder mismatch"
1405
1406 for idx, val in enumerate(placeholder_vals):
1407 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1408
1409 interpreter.invoke()
1410 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1411
1412 np.save(
1413 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1414 )
1415
1416 # Result tensor name would change after converting to TFLite flatbuffer
1417 # Overwrite the information from TFLite models directly.
1418 # Assume single result tensor now
1419 tflite_result_name = output_details[0]["name"]
1420
Eric Kunze97b00272023-07-20 10:52:56 -07001421 _, test_name = os.path.split(test_dir)
1422
Jeremy Johnson015c3552022-02-23 12:15:03 +00001423 # Write out test descriptor
1424 write_test_json(
1425 filename=os.path.join(test_dir, "test.json"),
1426 tf_model_filename=tf_model_filename,
1427 tf_result_npy_filename=tf_result_npy_filename,
1428 tf_result_name=tf_result_name,
1429 tflite_model_filename=tflite_model_filename,
1430 tflite_result_npy_filename=tflite_result_npy_filename,
1431 tflite_result_name=tflite_result_name,
1432 ifm_name=placeholder_names,
1433 ifm_file=placeholder_npy_filenames,
1434 ifm_shape=placeholder_shapes,
1435 framework_exclusions=excluded_framework_list,
1436 quantized=is_quantized,
Eric Kunze97b00272023-07-20 10:52:56 -07001437 test_name=test_name,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001438 )
1439 except Exception as e:
1440 msg = "Error running task: {}".format(e)
1441 print(msg)
1442 print(
1443 "".join(
1444 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1445 )
1446 )
1447 return False
1448 return True
1449
1450
1451def build_const_net(
1452 args,
1453 curr_shape,
1454 op_name,
1455 dtype,
1456 excluded_framework_list,
1457 quantized_inference_dtype,
1458 result_name,
1459 seed,
1460 rng,
1461 filter,
1462 unit_test_args,
1463):
1464
1465 if quantized_inference_dtype:
1466 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1467 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1468 else:
1469 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1470 test_dir = os.path.join(args.output_dir, test_dir)
1471
1472 # If the operator has an additional function to generate arguments, call it
1473 # here and iterate through the argument list that it generates
1474 op = TF_OP_LIST[op_name]
1475 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1476
TatWai Chongfd629052022-07-25 04:01:58 +00001477 try:
1478 rank_lo, rank_hi = op["rank"]
1479 except KeyError:
1480 # Set testing rank to (1, 4) in default.
1481 rank_lo = 1
1482 rank_hi = 4
1483
1484 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1485 return
1486
Jeremy Johnson015c3552022-02-23 12:15:03 +00001487 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1488 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001489 # Only filter on the full test_name, not the output directory
1490 _, test_name = os.path.split(test_dir + desc)
1491 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001492 unit_test_args.append(
1493 [
1494 op_name,
1495 args,
1496 test_dir + desc,
1497 curr_shape,
1498 addl_args,
1499 dtype,
1500 excluded_framework_list,
1501 quantized_inference_dtype,
1502 result_name,
1503 seed,
1504 ]
1505 )
1506
1507
1508# python hash is not reproducible, create hash for our purpose
1509def op_name_hash(op_name):
1510 result = 0xDEADBEEF
1511 for ch in op_name:
1512 if result & 1:
1513 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1514 else:
1515 result = (ord(ch) << 24) ^ (result >> 1)
1516
1517 return result
1518
1519
1520def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1521
1522 if not args.quiet:
1523 print(
1524 "Generating tests for {} ".format(
1525 op_name
1526 )
1527 )
1528
1529 op = TF_OP_LIST[op_name]
1530
1531 # Seed the RNG so that we get the same random tests for each test each time
1532 # If the number of tests for a given generation function changes, the tests
1533 # for that operator may also change accordingly, but this will at least keep
1534 # down churn across operators.
1535
1536 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1537 np.int32
1538 ).max
1539 rng = np.random.default_rng(bounded_hash_val)
1540
1541 # this is a dictionary with 'tf' and 'tflite' as key
1542 # and value being the data types we want to test under these framework
1543
1544 if isinstance(op["types"], dict):
1545 try:
1546 tf_dtypes = op["types"]["tf"]
1547 except KeyError:
1548 tf_dtypes = []
1549 try:
1550 tflite_dtypes = op["types"]["tflite"]
1551 except KeyError:
1552 tflite_dtypes = []
1553 elif isinstance(op["types"], list):
1554 tf_dtypes = op["types"]
1555 tflite_dtypes = op["types"]
1556
1557 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1558 tflite_quantized_dtypes = []
1559 tflite_nonquantized_dtypes = []
1560 for dtype in tflite_dtypes:
1561 if isinstance(dtype, QuantType):
1562 tflite_quantized_dtypes.append(dtype)
1563 else:
1564 tflite_nonquantized_dtypes.append(dtype)
1565
1566 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1567 set(tflite_nonquantized_dtypes)
1568 )
1569 nonquantized_dtypes = list(nonquantized_dtypes_set)
1570 quantized_dtypes = tflite_quantized_dtypes
1571
Jerry Ge5dd5a552023-05-23 22:41:20 +00001572 # append custom_shapes or replace shape_list with custom_shapes
1573 try:
1574 custom_shapes = op["custom_shapes"]
1575 if custom_shapes["custom_shape_only"]:
1576 shape_list = custom_shapes["shape_list"]
1577 else:
Jerry Geabdac232023-06-12 16:27:16 +00001578 shape_list = shape_list.copy()
Won Jeonf9c0cee2023-09-18 16:32:45 -07001579 shape_list.extend(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001580 except KeyError:
1581 pass
1582
Jeremy Johnson015c3552022-02-23 12:15:03 +00001583 # populate non quantized unit test arguments
1584 for dtype in nonquantized_dtypes:
1585
1586 excluded_framework_set = set(ALL_FRAMEWORKS)
1587 if dtype in tf_nonquantized_dtypes:
1588 excluded_framework_set.remove("tf")
1589 if dtype in tflite_nonquantized_dtypes:
1590 excluded_framework_set.remove("tflite")
1591 excluded_framework_list = list(excluded_framework_set)
1592
1593 for curr_shape in shape_list:
1594 build_const_net(
1595 args,
1596 curr_shape,
1597 op_name,
1598 dtype,
1599 excluded_framework_list,
1600 None,
1601 result_name,
1602 bounded_hash_val,
1603 rng,
1604 filter,
1605 unit_test_args,
1606 )
1607
1608 # populate quantized unit test arguments
1609 # must exclude 'tf' and source dtype being tf.float32
1610 for dtype in quantized_dtypes:
1611 for curr_shape in shape_list:
1612 build_const_net(
1613 args,
1614 curr_shape,
1615 op_name,
1616 tf.float32,
1617 ["tf"],
1618 dtype,
1619 result_name,
1620 bounded_hash_val,
1621 rng,
1622 filter,
1623 unit_test_args,
1624 )
1625
1626 return unit_test_args
1627
1628
1629def createDynamicOpLists():
1630 """The templated operators are conv2d-style operators with a number of kernel
1631 sizes. Since the operator is unchanged, we generate the range of kernel
1632 sizes here in this loop and remove the original templates from the list.
1633
1634 This could be expanded to non-conv2d-style operators in the future."""
1635
1636 # Dynamically create op lists for convolutions with a list of kernel sizes
1637 KERNELS = [
1638 [1, 1],
1639 [3, 3],
1640 [5, 5],
1641 ]
1642
TatWai Chongfd629052022-07-25 04:01:58 +00001643 # dim = [D, H, W]
1644 KERNELS_3D = [
1645 [1, 1, 1],
1646 [2, 3, 3],
1647 [3, 5, 5],
1648 ]
1649
Jeremy Johnson015c3552022-02-23 12:15:03 +00001650 TEMPLATE_LIST = [
1651 "conv2d",
1652 "conv2d_bias",
1653 "conv2d_relu",
1654 "conv2d_relu6",
1655 "conv2d_relu_n1_to_1",
1656 "conv2d_tanh",
1657 "depthwise_conv2d",
1658 "depthwise_conv2d_bias",
1659 "transpose_conv2d",
1660 ]
1661
TatWai Chongfd629052022-07-25 04:01:58 +00001662 TEMPLATE_LIST_CONV3D = [
1663 "conv3d",
1664 "conv3d_bias",
1665 ]
1666
Jeremy Johnson015c3552022-02-23 12:15:03 +00001667 for t in TEMPLATE_LIST:
1668 for k in KERNELS:
1669 testName = "{}_{}x{}".format(t, k[0], k[1])
1670 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1671 TF_OP_LIST[testName]["filter"] = k
1672 TF_OP_LIST[testName]["template"] = False
1673
TatWai Chongfd629052022-07-25 04:01:58 +00001674 # The existing operators don't support the dimension of kernel that is higher than 2.
1675 for t in TEMPLATE_LIST_CONV3D:
1676 for k in KERNELS_3D:
1677 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1678 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1679 TF_OP_LIST[testName]["filter"] = k
1680 TF_OP_LIST[testName]["template"] = False
1681
Jeremy Johnson015c3552022-02-23 12:15:03 +00001682 # Delete any templates after having created any dynamic ops
1683 # This is a two-pass operation because it's bad practice to delete
1684 # keys from dictionaries while iterating
1685 keyList = []
1686 for k in TF_OP_LIST:
1687 try:
1688 if TF_OP_LIST[k]["template"]:
1689 keyList.append(k)
1690 continue
1691 except KeyError:
1692 pass
1693
1694 for k in keyList:
1695 del TF_OP_LIST[k]
1696
1697
1698def main():
1699 parser = argparse.ArgumentParser()
1700 parser.add_argument(
1701 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1702 )
1703 parser.add_argument(
1704 "--random-shapes",
1705 dest="random_shapes",
1706 default=0,
1707 type=int,
1708 help=(
1709 "Use N random shapes of each rank for generating tests,"
1710 "seeded with random seed"
1711 ),
1712 )
1713 parser.add_argument(
1714 "-o",
1715 "--output-dir",
1716 dest="output_dir",
1717 default=".",
1718 type=str,
1719 help="Test output directory path prefix",
1720 )
1721 parser.add_argument(
1722 "-q",
1723 "--quiet",
1724 dest="quiet",
1725 default=False,
1726 action="store_true",
1727 help="Do not print test names",
1728 )
1729 parser.add_argument(
1730 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1731 )
1732 parser.add_argument(
1733 "-m",
1734 "--tflite-kernel-mode",
1735 dest="tflite_kernel_mode",
1736 type=str,
1737 choices=["reference", "optimized"],
1738 default="reference",
1739 help="TFLite interpreter kernel mode",
1740 )
1741 parser.add_argument(
1742 "--num-samples",
1743 dest="num_samples",
1744 default=200,
1745 type=int,
1746 help="Number of input samples for post-training quantization",
1747 )
1748 parser.add_argument(
1749 "--filter",
1750 dest="filter",
1751 default="",
1752 type=str,
1753 help="Filter test names by this expression",
1754 )
1755 args = parser.parse_args()
1756
1757 # Turn the filter into a re object if present
1758 filter = None
1759 if args.filter != "":
1760 filter = re.compile(args.filter)
1761
1762 # Autodetect CPU count
1763 if args.jobs <= 0:
1764 args.jobs = os.cpu_count()
1765
1766 # Disable TF info messages
1767 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1768
1769 try:
1770 os.makedirs(args.output_dir)
1771 except FileExistsError:
1772 pass
1773
1774 if args.random_shapes:
1775 gen_rand_shapes(args)
1776
1777 # Build dynamic ops
1778 createDynamicOpLists()
1779
1780 # Generate the test list and arguments to run_unit_test()
1781 unit_test_args = []
1782
1783 for op in TF_OP_LIST:
1784 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1785
1786 errors = 0
1787 for t in unit_test_args:
1788 if not run_unit_test(*t):
1789 errors = errors + 1
1790
1791 if not args.quiet:
1792 print("\nAll tasks done - with {} errors".format(errors))
1793
1794 return 1 if errors else 0
1795
1796
1797if __name__ == "__main__":
1798 exit(main())