blob: 9d666ab222d315f35cc1f210aac2e5433fc14994 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
Tai Lycf84bc92023-09-07 20:49:09 +000031
Jeremy Johnson015c3552022-02-23 12:15:03 +000032from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700177 "rank": (0, 4),
178 "custom_shapes": {
179 "custom_shape_only": False,
180 "shape_list": [()],
181 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000182 },
183 "bitwise_and": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_or": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_not": {
194 "operands": (1, 0),
195 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "bitwise_xor": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
201 "types": {"tf": TYPE_I}, # Not supported in TF Lite
202 },
203 "logical_and": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_or": {
209 "operands": (2, 0),
210 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "logical_not": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
216 "types": TYPE_B,
217 },
218 "reduce_any": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
221 "types": TYPE_B,
222 },
223 "reduce_all": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800226 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000227 },
228 "reduce_min": {
229 "operands": (1, 0),
230 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
231 "types": {
232 "tf": TYPE_FI,
233 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
234 },
235 },
236 "reduce_max": {
237 "operands": (1, 0),
238 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
239 "types": {
240 "tf": TYPE_FI,
241 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
242 },
243 },
244 "reduce_sum": {
245 "operands": (1, 0),
246 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
247 "types": {
248 "tf": TYPE_F,
249 # v2 converter doesn't recognize quantized reduce_sum
250 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
251 "tflite": TYPE_F,
252 },
253 },
254 "reduce_mean": {
255 "operands": (1, 0),
256 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
257 "types": {
258 "tf": TYPE_F,
259 "tflite": list(
260 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
261 ),
262 },
263 },
264 "reduce_product": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
267 "types": TYPE_F,
268 },
269 "min": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "max": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
277 "types": TYPE_FI,
278 },
279 "pow": {
280 "operands": (2, 0),
281 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
282 # Technically, integer is supported, but only for positive exponents.
283 # Needs a random argument generator.
284 "types": TYPE_F,
285 },
286 "abs": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "ceil": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "floor": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "log": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "negate": {
307 "operands": (1, 0),
308 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
309 "types": TYPE_F,
310 },
311 "rsqrt": {
312 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800313 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
317 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000318 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800319 "sign": {
320 "operands": (1, 0),
321 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
322 "types": {
323 "tf": TYPE_F,
324 },
325 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000326 "sigmoid": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
336 "tanh": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
339 "types": {
340 "tf": TYPE_F,
341 "tflite": list(
342 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
343 ),
344 },
345 },
Won Jeon78155c62023-06-10 00:20:04 +0000346 "erf": {
347 "operands": (1, 0),
348 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tf": TYPE_F,
351 },
352 },
Luke Hutton41601862022-12-06 17:29:15 +0000353 "sin": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000356 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000357 },
358 "cos": {
359 "operands": (1, 0),
360 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000361 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000362 },
Luke Hutton2138a192022-12-15 11:01:39 +0000363 "atan2": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
366 "types": {
367 "tflite": TYPE_F,
368 },
369 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000370 "square": {
371 "operands": (1, 0),
372 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
373 "types": TYPE_F,
374 },
375 "squared_difference": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000378 "types": {
379 "tf": TYPE_F,
380 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
381 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000382 },
383 "equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "greater_equal": {
389 "operands": (2, 0),
390 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
391 "types": TYPE_FI,
392 },
393 "greater": {
394 "operands": (2, 0),
395 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
396 "types": TYPE_FI,
397 },
398 "less": {
399 "operands": (2, 0),
400 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
401 "types": TYPE_FI,
402 },
403 "less_equal": {
404 "operands": (2, 0),
405 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
406 "types": TYPE_FI,
407 },
408 "conv2d_TEMPLATE": {
409 "operands": (1, 1),
410 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 "conv2d_relu6_TEMPLATE": {
437 "operands": (1, 2),
438 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
439 "types": {
440 "tf": [tf.float32],
441 "tflite": [
442 tf.float32,
443 QuantType.CONV_U8_U8,
444 QuantType.CONV_I8_I8,
445 QuantType.CONV_I16_I8,
446 ],
447 },
448 "template": True,
449 },
450 "conv2d_relu_n1_to_1_TEMPLATE": {
451 "operands": (1, 2),
452 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
453 "types": {
454 "tf": [tf.float32],
455 "tflite": [
456 tf.float32,
457 QuantType.CONV_U8_U8,
458 QuantType.CONV_I8_I8,
459 QuantType.CONV_I16_I8,
460 ],
461 },
462 "template": True,
463 },
464 # This test is converted as:
465 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
466 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
467 "conv2d_tanh_TEMPLATE": {
468 "operands": (1, 2),
469 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
470 "types": {
471 "tf": [tf.float32],
472 "tflite": [
473 tf.float32,
474 QuantType.CONV_U8_U8,
475 QuantType.CONV_I8_I8,
476 QuantType.CONV_I16_I8,
477 ],
478 },
479 "template": True,
480 },
481 "conv2d_bias_TEMPLATE": {
482 "operands": (1, 2),
483 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
484 "types": {
485 "tf": [tf.float32],
486 "tflite": [
487 tf.float32,
488 QuantType.CONV_U8_U8,
489 QuantType.CONV_I8_I8,
490 QuantType.CONV_I16_I8,
491 ],
492 },
493 "bias": True,
494 "template": True,
495 },
TatWai Chongfd629052022-07-25 04:01:58 +0000496 "conv3d_TEMPLATE": {
497 "operands": (1, 1),
498 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 # Quantization to 16x8-bit not yet supported by tflite.
506 ],
507 },
508 "template": True,
509 "rank": (1, 5),
510 },
511 "conv3d_bias_TEMPLATE": {
512 "operands": (1, 2),
513 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 # Quantization to 16x8-bit not yet supported by tflite.
521 ],
522 },
523 "bias": True,
524 "template": True,
525 "rank": (1, 5),
526 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000527 "depthwise_conv2d_TEMPLATE": {
528 "operands": (1, 1),
529 "build_fcn": (
530 TBuilder.DepthwiseConv2d,
531 TGen.tgDepthwiseConv2d,
532 ArgGen.agDepthwiseConv2d,
533 ),
534 "types": {
535 "tf": [tf.float32],
536 "tflite": [
537 tf.float32,
538 QuantType.CONV_U8_U8,
539 QuantType.CONV_I8_I8,
540 QuantType.CONV_I16_I8,
541 ],
542 },
543 "template": True,
544 },
545 "depthwise_conv2d_bias_TEMPLATE": {
546 "operands": (1, 2),
547 "build_fcn": (
548 TBuilder.DepthwiseConv2dWithBias,
549 TGen.tgDepthwiseConv2d,
550 ArgGen.agDepthwiseConv2d,
551 ),
552 "types": {
553 "tf": [tf.float32],
554 "tflite": [
555 tf.float32,
556 QuantType.CONV_U8_U8,
557 QuantType.CONV_I8_I8,
558 QuantType.CONV_I16_I8,
559 ],
560 },
561 "bias": True,
562 "template": True,
563 },
564 "transpose_conv2d_TEMPLATE": {
565 "operands": (1, 1),
566 "build_fcn": (
567 TBuilder.TransposeConv2d,
568 TGen.tgTransposeConv2d,
569 ArgGen.agTransposeConv2d,
570 ),
571 "types": {
572 "tf": [tf.float32],
573 "tflite": [
574 tf.float32,
575 QuantType.CONV_U8_U8,
576 QuantType.CONV_I8_I8,
577 QuantType.CONV_I16_I8,
578 ],
579 },
580 "template": True,
581 },
582 "argmax": {
583 "operands": (1, 0),
584 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
585 "types": {"tf": TYPE_F},
586 },
587 "avg_pool2d": {
588 "operands": (1, 0),
589 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
590 "types": {
591 "tf": TYPE_F,
592 "tflite": list(
593 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
594 ),
595 },
596 },
597 "max_pool2d": {
598 "operands": (1, 0),
599 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
600 "types": {
601 "tf": TYPE_F,
602 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
603 # ALL_I16 not supported yet
604 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
605 # QI16 is missing from MaxPoolOperandAndResultConstraints
606 # If adding QI16 back this test can run through.
607 },
608 },
609 "reshape": {
610 "operands": (1, 0),
611 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
612 "types": TYPE_FI,
613 },
614 "transpose": {
615 "operands": (1, 0),
616 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
617 "types": TYPE_FI,
618 },
619 "slice": {
620 "operands": (1, 0),
621 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
622 "types": TYPE_FI,
623 },
624 "strided_slice": {
625 "operands": (1, 0),
626 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
627 "types": TYPE_FI,
628 },
629 "select": {
630 "operands": (3, 0),
631 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
632 "types": TYPE_FI,
633 },
634 "addn": {
635 "operands": (4, 0),
636 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
637 "types": TYPE_FI,
638 },
639 "concatv2": {
640 "operands": (4, 0),
641 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
642 "types": TYPE_FI,
Won Jeonf9c0cee2023-09-18 16:32:45 -0700643 "rank": (0, 4),
644 "custom_shapes": {
645 "custom_shape_only": False,
646 "shape_list": [()],
647 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000648 },
649 "stack": {
650 "operands": (4, 0),
651 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
652 "types": TYPE_FI,
653 },
654 "unstack": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
657 "types": TYPE_F,
658 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000659 "mirrorpad": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
662 "types": TYPE_FI,
663 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000664 "pad": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800667 "types": {
668 "tf": TYPE_F,
669 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
670 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000671 },
672 "expand_dims": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
675 "types": TYPE_FI,
676 },
677 "shape": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
680 "types": TYPE_FI,
681 },
682 "rank": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_FI,
686 },
687 "fill": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
690 "types": TYPE_FI,
691 },
692 "elu": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
695 "types": TYPE_F,
696 },
697 "softmax": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
700 "types": {
701 "tf": TYPE_F,
702 "tflite": list(
703 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
704 ),
705 },
706 },
707 "log_softmax": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
710 "types": TYPE_F,
711 },
712 "matmul": {
713 "operands": (2, 0),
714 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
715 "types": {
716 "tf": TYPE_F,
717 "tflite": list(
718 TYPE_F
719 + [QuantType.ALL_U8, QuantType.ALL_I8]
720 # 16 bits matmul fail to convert
721 ),
722 },
723 },
724 "add_scalar": {
725 "operands": (1, 0),
726 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
727 "types": TYPE_F,
728 },
729 "add_1d": {
730 "operands": (2, 0),
731 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
732 "types": TYPE_F,
733 },
734 "split": {
735 "operands": (1, 0),
736 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
737 "types": TYPE_FI,
738 },
739 "tile": {
740 "operands": (1, 0),
741 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
742 "types": TYPE_FI,
743 },
744 "reverse": {
745 "operands": (1, 0),
746 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
747 "types": {"tf": TYPE_FI},
748 },
749 "gather": {
750 "operands": (1, 0),
751 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
752 "types": TYPE_FI,
753 },
754 "gather_nd": {
755 "operands": (1, 0),
756 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
757 "types": TYPE_FI,
758 },
759 "scatter_nd": {
760 "operands": (1, 0),
761 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
762 "types": TYPE_FI,
763 },
764 "space_to_batch": {
765 "operands": (1, 0),
766 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
767 "types": TYPE_F,
768 },
769 "batch_to_space": {
770 "operands": (1, 0),
771 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
772 "types": TYPE_F,
773 },
774 "space_to_depth": {
775 "operands": (1, 0),
776 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
777 "types": TYPE_F,
778 },
779 "depth_to_space": {
780 "operands": (1, 0),
781 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
782 "types": TYPE_F,
783 },
784 "one_hot": {
785 "operands": (3, 1),
786 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
787 "types": TYPE_FI,
788 },
789 "fakequant": {
790 "operands": (1, 0),
791 "build_fcn": (
792 TBuilder.Fakequant,
793 TGen.tgBasic,
794 ArgGen.agFakequant,
795 ),
796 "types": {"tf": TYPE_F},
797 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800798 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000799 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800800 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700801 "types": {
802 "tf": TYPE_F,
803 "tflite": list(
804 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
805 ),
806 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000807 "custom_shapes": {
808 "custom_shape_only": False,
809 "shape_list": [(3, 1, 1, 7)],
810 },
TatWai Chongf7326092022-06-08 12:17:14 -0700811 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000812 "left_shift": {
813 "operands": (1, 0),
814 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
815 "types": {"tf": [tf.int32]},
816 },
817 "right_shift": {
818 "operands": (1, 0),
819 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
820 "types": {
821 "tf": [
822 tf.int32,
823 ]
824 },
825 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700826 "while": {
827 "operands": (1, 0),
828 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
829 "types": {
830 "tflite": list(TYPE_F),
831 },
832 },
833 "lstm": {
834 "operands": (1, 0),
835 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
836 "types": {
837 "tflite": [
838 tf.float32,
839 # tf.int32
840 ]
841 },
842 },
Tai Lycf84bc92023-09-07 20:49:09 +0000843 "lstm_stateful": {
844 "operands": (1, 0),
845 "build_fcn": (TBuilder.SLSTM, TGen.tgRecurrent, ArgGen.agNone),
846 "types": {
847 "tflite": [
848 tf.float32,
849 ]
850 },
851 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700852 "gru": {
853 "operands": (1, 0),
854 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
855 "types": {
856 "tflite": [
857 tf.float32,
858 # tf.int32
859 ]
860 },
861 },
862 "rnn": {
863 "operands": (1, 0),
864 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
865 "types": {
866 "tflite": [
867 tf.float32,
868 ]
869 },
870 },
Tai Lycf84bc92023-09-07 20:49:09 +0000871 "callonce": {
872 "operands": (1, 0),
873 "build_fcn": (TBuilder.CallOnce, TGen.tgBasic, ArgGen.agNone),
874 "types": {
875 "tflite": [tf.float32],
876 },
877 "custom_shapes": {
878 "custom_shape_only": True,
879 "shape_list": [(1,)],
880 },
881 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000882 "rfft2d": {
883 "operands": (1, 0),
884 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
885 "types": {
886 "tflite": TYPE_F,
887 },
888 },
Luke Hutton714aa602023-02-08 19:45:26 +0000889 "real": {
890 "operands": (1, 0),
891 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
892 "types": {
893 "tflite": [tf.complex64],
894 },
895 },
896 "imag": {
897 "operands": (1, 0),
898 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
899 "types": {
900 "tflite": [tf.complex64],
901 },
902 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000903 "broadcastto": {
904 "operands": (1, 1),
905 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
906 "types": {
907 "tf": TYPE_FIB,
908 },
909 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000910}
911
912# Shapes to be tested; default can be overwritten
913shape_list = [
914 (1,),
915 (64,),
916 (14, 19),
917 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000918 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000919 (1, 4, 4, 4),
920 (1, 8, 4, 17),
921 (1, 4, 8, 19),
922 (1, 32, 32, 8),
923 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800924 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000925 (2, 2, 7, 7, 2),
926 (1, 4, 8, 21, 17),
927 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000928]
929
930
931def gen_rand_shapes(args):
932 """Overwrite the global shape list with a new list of random shapes"""
933 global shape_list
934
935 rng = np.random.default_rng(args.random_seed)
936
937 # Don't let things get too big... cap the maximum volume, but let
938 # an individual dimension be 1..47
939 max_total_volume = 32 * 32 * 4
940
941 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000942 # Only iterate over ranks 2, 3, 4, and 5
943 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000944 for n in range(args.random_shapes):
945 new_shape = rng.integers(1, 48, size=rank)
946
TatWai Chongfd629052022-07-25 04:01:58 +0000947 # Set the batch dimension on 4D or 5D objects to 1
948 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000949 new_shape[0] = 1
950
951 # Limit the total shape volume and throw out any
952 # shapes that wouldn't leave at least size=2 in some non-batch dimension
953 volume = 1
954 skip_shape = False
955 for i in range(rank):
956
957 volume *= new_shape[i]
958
959 # Reduce the shape, while it's larger than the maximum volume
960 while volume > max_total_volume:
961 new_shape[i] = new_shape[i] // 2
962 volume = volume // 2
963
964 # Now an untenable dimension size? Skip this one.
965 if new_shape[i] < 1:
966 skip_shape = True
967
968 if not skip_shape:
969 shape_list.append(tuple(new_shape))
970
971
972# Construct, run and save a whole tensorflow tf.function to a protobuf file
973# or convert to .tflite if it's quantized unit test
974def run_unit_test(
975 op_name,
976 args,
977 test_dir,
978 curr_shape,
979 addl_args,
980 dtype,
981 excluded_framework_list,
982 quantized_inference_dtype,
983 result_name,
984 seed,
985):
986
987 try:
988 op = TF_OP_LIST[op_name]
989 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
990
991 # Get and seed a random number generator for this test
992 rng = np.random.default_rng(seed)
993
994 # return placeholders=(str: name, np.array: value)
995 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +0000996 placeholders, consts = (
Won Jeon6c93f412023-07-08 07:04:08 +0000997 tensor_gen_fcn(op, curr_shape, dtype, rng, False)
Won Jeone2325d12023-06-10 15:25:54 +0000998 if tensor_gen_fcn.__name__ == "tgBFuzz"
999 else tensor_gen_fcn(op, curr_shape, dtype, rng)
1000 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001001
1002 # if test doesn't have any placeholders/consts, terminated
1003 if len(placeholders) == 0 and len(consts) == 0:
1004 return True
1005
1006 if not args.quiet:
1007 print(" {} ".format(test_dir))
1008
1009 try:
1010 os.mkdir(test_dir)
1011 except FileExistsError:
1012 pass
1013
1014 const_nodes = [value for name, value in consts]
1015
1016 num_placeholders = len(placeholders)
1017 # if test is quantized, create tensor quantization metadata info for
1018 # each input tensor, based on different quantized type
1019 if quantized_inference_dtype:
1020 is_quantized = True
1021 # TODO: support INT8 IFM x INT4 weight later
1022 if quantized_inference_dtype == QuantType.ALL_U8:
1023 qzero = [128] * num_placeholders
1024 numpy_dtype = [np.uint8] * num_placeholders
1025 tflite_inference_dtype = tf.uint8
1026 elif quantized_inference_dtype == QuantType.ALL_I8:
1027 qzero = [0] * num_placeholders
1028 numpy_dtype = [np.int8] * num_placeholders
1029 tflite_inference_dtype = tf.int8
1030 elif quantized_inference_dtype == QuantType.ALL_I16:
1031 qzero = [0] * num_placeholders
1032 numpy_dtype = [np.int16] * num_placeholders
1033 tflite_inference_dtype = tf.int16
1034 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1035 assert (
1036 num_placeholders == 1
1037 ), "Unsupported number of placeholders for Convolution: {}".format(
1038 num_placeholders
1039 )
1040 qzero = [128] * num_placeholders
1041 if num_placeholders == 2:
1042 numpy_dtype = [np.uint8, np.uint8]
1043 else:
1044 numpy_dtype = [np.uint8, np.uint8, np.int32]
1045 tflite_inference_dtype = tf.uint8
1046 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1047 assert (
1048 num_placeholders == 1
1049 ), "Unsupported number of placeholders for Convolution: {}".format(
1050 num_placeholders
1051 )
1052 qzero = [0] * num_placeholders
1053 if num_placeholders == 2:
1054 numpy_dtype = [np.int8, np.int8]
1055 else:
1056 numpy_dtype = [np.int8, np.int8, np.int32]
1057 tflite_inference_dtype = tf.int8
1058 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1059 assert (
1060 num_placeholders == 1
1061 ), "Unsupported number of placeholders for Convolution: {}".format(
1062 num_placeholders
1063 )
1064 if num_placeholders == 2:
1065 qzero = [0, 0]
1066 numpy_dtype = [np.int16, np.int8]
1067 else:
1068 qzero = [0, 0, 0]
1069 numpy_dtype = [
1070 np.int16,
1071 np.int8,
1072 np.int64,
1073 ] # np.int64 to represent 40 bits accumulator
1074 tflite_inference_dtype = tf.int16
1075 else:
1076 raise Exception(
1077 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1078 )
1079
1080 else:
1081 is_quantized = False
1082
1083 tf_model_filename = None
1084 tf_result_npy_filename = None
1085 tf_result_name = None
1086
1087 tflite_model_filename = None
1088 tflite_result_npy_filename = None
1089 tflite_result_name = None
1090
1091 placeholder_names = []
1092 placeholder_vals = []
1093 placeholder_signatures = ()
1094 placeholder_npy_filenames = []
1095 placeholder_shapes = []
1096
1097 for idx, (name, val) in enumerate(placeholders):
1098 placeholder_names.append(name)
1099 placeholder_signatures = placeholder_signatures + (
1100 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1101 )
1102 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1103 placeholder_shapes.append(val.shape)
1104
1105 # Get test builder class
1106 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1107 concrete_function = tf.function(input_signature=placeholder_signatures)(
1108 fcn_node.eval
1109 ).get_concrete_function()
1110
1111 if is_quantized:
1112
1113 assert dtype is tf.float32, "quantized test must come from float32 graph"
1114
1115 # 1. Quantize float placeholder npy to quantized to feed the graph
1116 for idx, (name, val) in enumerate(placeholders):
1117
1118 # we use np.amin()/np.amax() to determine dynamic range
1119 # for quantized test
1120 zeropoint = 0
1121 scale = 1.0
1122 if numpy_dtype[idx] != np.int64:
1123 qmin = np.iinfo(numpy_dtype[idx]).min
1124 qmax = np.iinfo(numpy_dtype[idx]).max
1125 num_bits = np.iinfo(numpy_dtype[idx]).bits
1126 # 40 bit is represented as np.int64
1127 else:
1128 num_bits = 40
1129 qmin = -(1 << num_bits)
1130 qmax = (1 << num_bits) - 1
1131
1132 min_val = np.amin(val)
1133 max_val = np.amax(val)
1134
1135 # for single value tensor, we set scale equal to the abs(value),
1136 # and fix zeropoint to 128
1137 # if val > 0, it'll be represented as 129,
1138 # where val = (129 - 128) * val
1139 # if val < 0, it'll be represented as 127,
1140 # where val = (127 - 128) * (-val)
1141 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1142 # and let quantized 1 represent the value
1143 # also adjust effective min/max consequently
1144 if max_val == min_val:
1145 if max_val != 0:
1146 scale = abs(max_val)
1147 else:
1148 scale = 1.0
1149 min_val = float(qmin - qzero[idx]) * scale
1150 max_val = float(qmax - qzero[idx]) * scale
1151 else:
1152 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeon6c93f412023-07-08 07:04:08 +00001153 if op_name == "squared_difference":
1154 zeropoint = -int(round((-min_val) / scale)) + qmin
1155 else:
1156 zeropoint = int(round((-min_val) / scale)) + qmin
Jeremy Johnson015c3552022-02-23 12:15:03 +00001157
1158 # run through tf.fakequant first to assure quantization error aligned
1159 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1160 val,
1161 min=min_val,
1162 max=max_val,
1163 num_bits=num_bits,
1164 name="gen_quant_npy",
1165 )
1166
Jerry Ged69e2832023-07-05 21:54:07 +00001167 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001168
1169 # very few unit tests after TF hash may/2020, this quantized
1170 # value for some reason exceed [0, 255] range
1171 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1172
Jeremy Johnson015c3552022-02-23 12:15:03 +00001173 np.save(
1174 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001175 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001176 False,
1177 )
1178
1179 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1180
1181 # 2. Convert the model to quantized TFLite flatbuffer
1182 module = tf.Module()
1183 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1184 [concrete_function], module
1185 )
1186 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1187 converter.experimental_new_converter = True
1188
1189 # use MLIR-based post-quantizer
1190 converter.experimental_new_quantizer = True
1191
1192 flag = (
1193 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1194 )
1195 if tflite_inference_dtype == tf.int16:
1196 converter.target_spec.supported_ops = [flag]
1197
Won Jeone2325d12023-06-10 15:25:54 +00001198 # Generator function for integer quantization of TFLiteConverter
1199 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1200 # to calibrate/estimate the range of the floating-point inputs.
1201 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1202 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001203 def input_stats():
1204 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001205 placeholders, _ = (
Won Jeon6c93f412023-07-08 07:04:08 +00001206 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, True)
Won Jeone2325d12023-06-10 15:25:54 +00001207 if tensor_gen_fcn == "tgBFuzz"
1208 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1209 )
1210 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001211
1212 converter.representative_dataset = input_stats
1213 converter.inference_input_type = tflite_inference_dtype
1214 converter.inference_output_type = tflite_inference_dtype
1215
1216 tflite_model = converter.convert()
1217
1218 tflite_model_filename = "model.tflite"
1219
1220 # Write out converted model to disk
1221 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1222 f.write(tflite_model)
1223
1224 else: # is_quantized is False
1225
1226 # 1. Saved out numpy array directly
1227 for idx, (name, val) in enumerate(placeholders):
1228 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001229
1230 # Complex tensors are expected to be repsesented by a
1231 # single floating point tensor of shape [?, ..., ?, 2].
1232 if val.dtype == np.complex64:
1233 val_shape = val.shape + (2,)
1234 val = val.view(np.float32)
1235 val = val.reshape(val_shape)
1236
Jeremy Johnson015c3552022-02-23 12:15:03 +00001237 np.save(
1238 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1239 )
1240
1241 # 2.a Saved out .pb if framework includes tensorflow
1242 if "tf" not in excluded_framework_list:
1243 # Write out graph as protobuf to disk
1244 tf_model_filename = "model.pb"
1245 tf.io.write_graph(
1246 concrete_function.graph, test_dir, tf_model_filename, True
1247 )
1248
1249 # 2.b Saved out .tflite if framework includes tflite
1250 if "tflite" not in excluded_framework_list:
1251 # Convert the model to TFLite flatbuffer
1252 module = tf.Module()
Tai Lycf84bc92023-09-07 20:49:09 +00001253
1254 if op_name == "callonce" or op_name == "lstm_stateful":
1255 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1256 [concrete_function], fcn_node
1257 )
1258 else:
1259 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1260 [concrete_function], module
1261 )
Jeremy Johnson015c3552022-02-23 12:15:03 +00001262
1263 converter.experimental_new_converter = True
1264
1265 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1266 converter.inference_input_type = tf.float32
1267 converter.inference_output_type = tf.float32
1268 tflite_model = converter.convert()
1269
1270 # Write out converted model to disk
1271 tflite_model_filename = "model.tflite"
1272 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1273 f.write(tflite_model)
1274
1275 # Get TF reference result if .pb is specified
1276 if tf_model_filename:
1277 tf_result_npy_filename = "tf_result.npy"
1278 tf_result = concrete_function(*placeholder_vals)
1279 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1280
1281 tf_result_name = result_name
1282
1283 # Get TFLite inference result if .tflite is specified
1284 if tflite_model_filename:
1285 tflite_result_npy_filename = "tflite_result.npy"
1286
Luke Hutton5c844212023-01-27 14:17:52 +00001287 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001288
1289 if args.tflite_kernel_mode == "optimized" or (
1290 op_name in ops_with_optimized_only_kernel
1291 ):
1292 interpreter = tf.lite.Interpreter(
1293 model_path=os.path.join(test_dir, tflite_model_filename)
1294 )
1295 elif args.tflite_kernel_mode == "reference":
1296 interpreter = tf.lite.Interpreter(
1297 model_path=os.path.join(test_dir, tflite_model_filename),
1298 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1299 )
1300 else:
1301 assert 0, "unknown tflite interpreter mode {}".format(
1302 args.tflite_kernel_mode
1303 )
1304 interpreter.allocate_tensors()
1305
1306 input_details = interpreter.get_input_details()
1307 output_details = interpreter.get_output_details()
1308
1309 assert len(input_details) == len(
1310 placeholder_vals
1311 ), "number of placeholder mismatch"
1312
1313 for idx, val in enumerate(placeholder_vals):
1314 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1315
1316 interpreter.invoke()
1317 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1318
1319 np.save(
1320 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1321 )
1322
1323 # Result tensor name would change after converting to TFLite flatbuffer
1324 # Overwrite the information from TFLite models directly.
1325 # Assume single result tensor now
1326 tflite_result_name = output_details[0]["name"]
1327
Eric Kunze97b00272023-07-20 10:52:56 -07001328 _, test_name = os.path.split(test_dir)
1329
Jeremy Johnson015c3552022-02-23 12:15:03 +00001330 # Write out test descriptor
1331 write_test_json(
1332 filename=os.path.join(test_dir, "test.json"),
1333 tf_model_filename=tf_model_filename,
1334 tf_result_npy_filename=tf_result_npy_filename,
1335 tf_result_name=tf_result_name,
1336 tflite_model_filename=tflite_model_filename,
1337 tflite_result_npy_filename=tflite_result_npy_filename,
1338 tflite_result_name=tflite_result_name,
1339 ifm_name=placeholder_names,
1340 ifm_file=placeholder_npy_filenames,
1341 ifm_shape=placeholder_shapes,
1342 framework_exclusions=excluded_framework_list,
1343 quantized=is_quantized,
Eric Kunze97b00272023-07-20 10:52:56 -07001344 test_name=test_name,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001345 )
1346 except Exception as e:
1347 msg = "Error running task: {}".format(e)
1348 print(msg)
1349 print(
1350 "".join(
1351 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1352 )
1353 )
1354 return False
1355 return True
1356
1357
1358def build_const_net(
1359 args,
1360 curr_shape,
1361 op_name,
1362 dtype,
1363 excluded_framework_list,
1364 quantized_inference_dtype,
1365 result_name,
1366 seed,
1367 rng,
1368 filter,
1369 unit_test_args,
1370):
1371
1372 if quantized_inference_dtype:
1373 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1374 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1375 else:
1376 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1377 test_dir = os.path.join(args.output_dir, test_dir)
1378
1379 # If the operator has an additional function to generate arguments, call it
1380 # here and iterate through the argument list that it generates
1381 op = TF_OP_LIST[op_name]
1382 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1383
TatWai Chongfd629052022-07-25 04:01:58 +00001384 try:
1385 rank_lo, rank_hi = op["rank"]
1386 except KeyError:
1387 # Set testing rank to (1, 4) in default.
1388 rank_lo = 1
1389 rank_hi = 4
1390
1391 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1392 return
1393
Jeremy Johnson015c3552022-02-23 12:15:03 +00001394 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1395 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001396 # Only filter on the full test_name, not the output directory
1397 _, test_name = os.path.split(test_dir + desc)
1398 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001399 unit_test_args.append(
1400 [
1401 op_name,
1402 args,
1403 test_dir + desc,
1404 curr_shape,
1405 addl_args,
1406 dtype,
1407 excluded_framework_list,
1408 quantized_inference_dtype,
1409 result_name,
1410 seed,
1411 ]
1412 )
1413
1414
1415# python hash is not reproducible, create hash for our purpose
1416def op_name_hash(op_name):
1417 result = 0xDEADBEEF
1418 for ch in op_name:
1419 if result & 1:
1420 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1421 else:
1422 result = (ord(ch) << 24) ^ (result >> 1)
1423
1424 return result
1425
1426
1427def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1428
1429 if not args.quiet:
1430 print(
1431 "Generating tests for {} ".format(
1432 op_name
1433 )
1434 )
1435
1436 op = TF_OP_LIST[op_name]
1437
1438 # Seed the RNG so that we get the same random tests for each test each time
1439 # If the number of tests for a given generation function changes, the tests
1440 # for that operator may also change accordingly, but this will at least keep
1441 # down churn across operators.
1442
1443 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1444 np.int32
1445 ).max
1446 rng = np.random.default_rng(bounded_hash_val)
1447
1448 # this is a dictionary with 'tf' and 'tflite' as key
1449 # and value being the data types we want to test under these framework
1450
1451 if isinstance(op["types"], dict):
1452 try:
1453 tf_dtypes = op["types"]["tf"]
1454 except KeyError:
1455 tf_dtypes = []
1456 try:
1457 tflite_dtypes = op["types"]["tflite"]
1458 except KeyError:
1459 tflite_dtypes = []
1460 elif isinstance(op["types"], list):
1461 tf_dtypes = op["types"]
1462 tflite_dtypes = op["types"]
1463
1464 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1465 tflite_quantized_dtypes = []
1466 tflite_nonquantized_dtypes = []
1467 for dtype in tflite_dtypes:
1468 if isinstance(dtype, QuantType):
1469 tflite_quantized_dtypes.append(dtype)
1470 else:
1471 tflite_nonquantized_dtypes.append(dtype)
1472
1473 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1474 set(tflite_nonquantized_dtypes)
1475 )
1476 nonquantized_dtypes = list(nonquantized_dtypes_set)
1477 quantized_dtypes = tflite_quantized_dtypes
1478
Jerry Ge5dd5a552023-05-23 22:41:20 +00001479 # append custom_shapes or replace shape_list with custom_shapes
1480 try:
1481 custom_shapes = op["custom_shapes"]
1482 if custom_shapes["custom_shape_only"]:
1483 shape_list = custom_shapes["shape_list"]
1484 else:
Jerry Geabdac232023-06-12 16:27:16 +00001485 shape_list = shape_list.copy()
Won Jeonf9c0cee2023-09-18 16:32:45 -07001486 shape_list.extend(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001487 except KeyError:
1488 pass
1489
Jeremy Johnson015c3552022-02-23 12:15:03 +00001490 # populate non quantized unit test arguments
1491 for dtype in nonquantized_dtypes:
1492
1493 excluded_framework_set = set(ALL_FRAMEWORKS)
1494 if dtype in tf_nonquantized_dtypes:
1495 excluded_framework_set.remove("tf")
1496 if dtype in tflite_nonquantized_dtypes:
1497 excluded_framework_set.remove("tflite")
1498 excluded_framework_list = list(excluded_framework_set)
1499
1500 for curr_shape in shape_list:
1501 build_const_net(
1502 args,
1503 curr_shape,
1504 op_name,
1505 dtype,
1506 excluded_framework_list,
1507 None,
1508 result_name,
1509 bounded_hash_val,
1510 rng,
1511 filter,
1512 unit_test_args,
1513 )
1514
1515 # populate quantized unit test arguments
1516 # must exclude 'tf' and source dtype being tf.float32
1517 for dtype in quantized_dtypes:
1518 for curr_shape in shape_list:
1519 build_const_net(
1520 args,
1521 curr_shape,
1522 op_name,
1523 tf.float32,
1524 ["tf"],
1525 dtype,
1526 result_name,
1527 bounded_hash_val,
1528 rng,
1529 filter,
1530 unit_test_args,
1531 )
1532
1533 return unit_test_args
1534
1535
1536def createDynamicOpLists():
1537 """The templated operators are conv2d-style operators with a number of kernel
1538 sizes. Since the operator is unchanged, we generate the range of kernel
1539 sizes here in this loop and remove the original templates from the list.
1540
1541 This could be expanded to non-conv2d-style operators in the future."""
1542
1543 # Dynamically create op lists for convolutions with a list of kernel sizes
1544 KERNELS = [
1545 [1, 1],
1546 [3, 3],
1547 [5, 5],
1548 ]
1549
TatWai Chongfd629052022-07-25 04:01:58 +00001550 # dim = [D, H, W]
1551 KERNELS_3D = [
1552 [1, 1, 1],
1553 [2, 3, 3],
1554 [3, 5, 5],
1555 ]
1556
Jeremy Johnson015c3552022-02-23 12:15:03 +00001557 TEMPLATE_LIST = [
1558 "conv2d",
1559 "conv2d_bias",
1560 "conv2d_relu",
1561 "conv2d_relu6",
1562 "conv2d_relu_n1_to_1",
1563 "conv2d_tanh",
1564 "depthwise_conv2d",
1565 "depthwise_conv2d_bias",
1566 "transpose_conv2d",
1567 ]
1568
TatWai Chongfd629052022-07-25 04:01:58 +00001569 TEMPLATE_LIST_CONV3D = [
1570 "conv3d",
1571 "conv3d_bias",
1572 ]
1573
Jeremy Johnson015c3552022-02-23 12:15:03 +00001574 for t in TEMPLATE_LIST:
1575 for k in KERNELS:
1576 testName = "{}_{}x{}".format(t, k[0], k[1])
1577 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1578 TF_OP_LIST[testName]["filter"] = k
1579 TF_OP_LIST[testName]["template"] = False
1580
TatWai Chongfd629052022-07-25 04:01:58 +00001581 # The existing operators don't support the dimension of kernel that is higher than 2.
1582 for t in TEMPLATE_LIST_CONV3D:
1583 for k in KERNELS_3D:
1584 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1585 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1586 TF_OP_LIST[testName]["filter"] = k
1587 TF_OP_LIST[testName]["template"] = False
1588
Jeremy Johnson015c3552022-02-23 12:15:03 +00001589 # Delete any templates after having created any dynamic ops
1590 # This is a two-pass operation because it's bad practice to delete
1591 # keys from dictionaries while iterating
1592 keyList = []
1593 for k in TF_OP_LIST:
1594 try:
1595 if TF_OP_LIST[k]["template"]:
1596 keyList.append(k)
1597 continue
1598 except KeyError:
1599 pass
1600
1601 for k in keyList:
1602 del TF_OP_LIST[k]
1603
1604
1605def main():
1606 parser = argparse.ArgumentParser()
1607 parser.add_argument(
1608 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1609 )
1610 parser.add_argument(
1611 "--random-shapes",
1612 dest="random_shapes",
1613 default=0,
1614 type=int,
1615 help=(
1616 "Use N random shapes of each rank for generating tests,"
1617 "seeded with random seed"
1618 ),
1619 )
1620 parser.add_argument(
1621 "-o",
1622 "--output-dir",
1623 dest="output_dir",
1624 default=".",
1625 type=str,
1626 help="Test output directory path prefix",
1627 )
1628 parser.add_argument(
1629 "-q",
1630 "--quiet",
1631 dest="quiet",
1632 default=False,
1633 action="store_true",
1634 help="Do not print test names",
1635 )
1636 parser.add_argument(
1637 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1638 )
1639 parser.add_argument(
1640 "-m",
1641 "--tflite-kernel-mode",
1642 dest="tflite_kernel_mode",
1643 type=str,
1644 choices=["reference", "optimized"],
1645 default="reference",
1646 help="TFLite interpreter kernel mode",
1647 )
1648 parser.add_argument(
1649 "--num-samples",
1650 dest="num_samples",
1651 default=200,
1652 type=int,
1653 help="Number of input samples for post-training quantization",
1654 )
1655 parser.add_argument(
1656 "--filter",
1657 dest="filter",
1658 default="",
1659 type=str,
1660 help="Filter test names by this expression",
1661 )
1662 args = parser.parse_args()
1663
1664 # Turn the filter into a re object if present
1665 filter = None
1666 if args.filter != "":
1667 filter = re.compile(args.filter)
1668
1669 # Autodetect CPU count
1670 if args.jobs <= 0:
1671 args.jobs = os.cpu_count()
1672
1673 # Disable TF info messages
1674 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1675
1676 try:
1677 os.makedirs(args.output_dir)
1678 except FileExistsError:
1679 pass
1680
1681 if args.random_shapes:
1682 gen_rand_shapes(args)
1683
1684 # Build dynamic ops
1685 createDynamicOpLists()
1686
1687 # Generate the test list and arguments to run_unit_test()
1688 unit_test_args = []
1689
1690 for op in TF_OP_LIST:
1691 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1692
1693 errors = 0
1694 for t in unit_test_args:
1695 if not run_unit_test(*t):
1696 errors = errors + 1
1697
1698 if not args.quiet:
1699 print("\nAll tasks done - with {} errors".format(errors))
1700
1701 return 1 if errors else 0
1702
1703
1704if __name__ == "__main__":
1705 exit(main())