blob: 09a06b4f75c1dc0c7a568a420b1d2023b8fd690e [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000059# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
63# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
64# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000065
66TF_OP_LIST = {
67 "add": {
68 "operands": (2, 0),
69 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
70 "types": {
71 "tf": TYPE_FI,
72 "tflite": list(
73 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
74 ),
75 },
76 },
77 "sub": {
78 "operands": (2, 0),
79 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
80 "types": {
81 "tf": TYPE_FI,
82 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
83 # QuantType.ALL_I16 fail in TFLite conversion
84 },
85 },
86 "mul": {
87 "operands": (2, 0),
88 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
89 "types": {
90 "tf": TYPE_FI,
91 "tflite": list(
92 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
93 ),
94 },
95 },
96 "exp": {
97 "operands": (1, 0),
98 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
99 "types": TYPE_F,
100 },
101 "rcp": {
102 "operands": (1, 0),
103 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
104 "types": TYPE_F,
105 },
106 "relu": {
107 "operands": (1, 0),
108 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
109 "types": {
110 "tf": TYPE_F,
111 "tflite": list(
112 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
113 ),
114 },
115 },
Jerry Ge93912432022-07-22 10:29:13 -0700116 "relu1": {
117 "operands": (1, 0),
118 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
119 "types": {
120 "tf": [],
121 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
122 },
123 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000124 "relu0To1": {
125 "operands": (1, 0),
126 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
127 "types": {
128 "tf": [],
129 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
130 },
131 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000132 "relu6": {
133 "operands": (1, 0),
134 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
135 "types": {
136 "tf": TYPE_F,
137 "tflite": list(
138 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
139 ),
140 },
141 },
142 "leaky_relu": {
143 "operands": (1, 0),
144 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
145 "types": {
146 "tf": TYPE_F,
147 "tflite": list(
148 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
149 ),
150 },
151 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000152 "prelu": {
153 "operands": (1, 0),
154 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
155 "types": {
156 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
157 },
158 },
TatWai Chong473eb382022-08-02 04:21:30 +0000159 "gelu": {
160 "operands": (1, 0),
161 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
162 "types": {
163 # Need compiler support for tf.Erf.
164 # "tf": TYPE_F,
165 "tflite": list(
166 # Only float32, int8 and uint8 supported currently
167 TYPE_F
168 + [QuantType.ALL_U8, QuantType.ALL_I8]
169 ),
170 },
171 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000172 "concat": {
173 "operands": (2, 0),
174 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
175 "types": TYPE_FI,
176 },
177 "bitwise_and": {
178 "operands": (2, 0),
179 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
180 "types": {"tf": TYPE_I}, # Not supported in TF Lite
181 },
182 "bitwise_or": {
183 "operands": (2, 0),
184 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
185 "types": {"tf": TYPE_I}, # Not supported in TF Lite
186 },
187 "bitwise_not": {
188 "operands": (1, 0),
189 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
190 "types": {"tf": TYPE_I}, # Not supported in TF Lite
191 },
192 "bitwise_xor": {
193 "operands": (2, 0),
194 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
195 "types": {"tf": TYPE_I}, # Not supported in TF Lite
196 },
197 "logical_and": {
198 "operands": (2, 0),
199 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
200 "types": TYPE_B,
201 },
202 "logical_or": {
203 "operands": (2, 0),
204 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
205 "types": TYPE_B,
206 },
207 "logical_not": {
208 "operands": (1, 0),
209 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
210 "types": TYPE_B,
211 },
212 "reduce_any": {
213 "operands": (1, 0),
214 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
215 "types": TYPE_B,
216 },
217 "reduce_all": {
218 "operands": (1, 0),
219 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800220 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000221 },
222 "reduce_min": {
223 "operands": (1, 0),
224 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
225 "types": {
226 "tf": TYPE_FI,
227 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
228 },
229 },
230 "reduce_max": {
231 "operands": (1, 0),
232 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
233 "types": {
234 "tf": TYPE_FI,
235 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 },
237 },
238 "reduce_sum": {
239 "operands": (1, 0),
240 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
241 "types": {
242 "tf": TYPE_F,
243 # v2 converter doesn't recognize quantized reduce_sum
244 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
245 "tflite": TYPE_F,
246 },
247 },
248 "reduce_mean": {
249 "operands": (1, 0),
250 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
251 "types": {
252 "tf": TYPE_F,
253 "tflite": list(
254 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
255 ),
256 },
257 },
258 "reduce_product": {
259 "operands": (1, 0),
260 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
261 "types": TYPE_F,
262 },
263 "min": {
264 "operands": (2, 0),
265 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
266 "types": TYPE_FI,
267 },
268 "max": {
269 "operands": (2, 0),
270 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
271 "types": TYPE_FI,
272 },
273 "pow": {
274 "operands": (2, 0),
275 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
276 # Technically, integer is supported, but only for positive exponents.
277 # Needs a random argument generator.
278 "types": TYPE_F,
279 },
280 "abs": {
281 "operands": (1, 0),
282 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
283 "types": TYPE_F,
284 },
285 "ceil": {
286 "operands": (1, 0),
287 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
288 "types": TYPE_F,
289 },
290 "floor": {
291 "operands": (1, 0),
292 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
293 "types": TYPE_F,
294 },
295 "log": {
296 "operands": (1, 0),
297 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
298 "types": TYPE_F,
299 },
300 "negate": {
301 "operands": (1, 0),
302 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
303 "types": TYPE_F,
304 },
305 "rsqrt": {
306 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800307 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
308 "types": {
309 "tf": TYPE_F,
310 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
311 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000312 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800313 "sign": {
314 "operands": (1, 0),
315 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
316 "types": {
317 "tf": TYPE_F,
318 },
319 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000320 "sigmoid": {
321 "operands": (1, 0),
322 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
323 "types": {
324 "tf": TYPE_F,
325 "tflite": list(
326 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
327 ),
328 },
329 },
330 "tanh": {
331 "operands": (1, 0),
332 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
333 "types": {
334 "tf": TYPE_F,
335 "tflite": list(
336 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
337 ),
338 },
339 },
Won Jeon78155c62023-06-10 00:20:04 +0000340 "erf": {
341 "operands": (1, 0),
342 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
343 "types": {
344 "tf": TYPE_F,
345 },
346 },
Luke Hutton41601862022-12-06 17:29:15 +0000347 "sin": {
348 "operands": (1, 0),
349 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000350 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000351 },
352 "cos": {
353 "operands": (1, 0),
354 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000355 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000356 },
Luke Hutton2138a192022-12-15 11:01:39 +0000357 "atan2": {
358 "operands": (2, 0),
359 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
360 "types": {
361 "tflite": TYPE_F,
362 },
363 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000364 "square": {
365 "operands": (1, 0),
366 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
367 "types": TYPE_F,
368 },
369 "squared_difference": {
370 "operands": (2, 0),
371 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000372 "types": {
373 "tf": TYPE_F,
374 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
375 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000376 },
377 "equal": {
378 "operands": (2, 0),
379 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
380 "types": TYPE_FI,
381 },
382 "greater_equal": {
383 "operands": (2, 0),
384 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
385 "types": TYPE_FI,
386 },
387 "greater": {
388 "operands": (2, 0),
389 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
390 "types": TYPE_FI,
391 },
392 "less": {
393 "operands": (2, 0),
394 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
395 "types": TYPE_FI,
396 },
397 "less_equal": {
398 "operands": (2, 0),
399 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
400 "types": TYPE_FI,
401 },
402 "conv2d_TEMPLATE": {
403 "operands": (1, 1),
404 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
405 "types": {
406 "tf": [tf.float32],
407 "tflite": [
408 tf.float32,
409 QuantType.CONV_U8_U8,
410 QuantType.CONV_I8_I8,
411 QuantType.CONV_I16_I8,
412 ],
413 },
414 "template": True,
415 },
416 "conv2d_relu_TEMPLATE": {
417 "operands": (1, 2),
418 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
419 "types": {
420 "tf": [tf.float32],
421 "tflite": [
422 tf.float32,
423 QuantType.CONV_U8_U8,
424 QuantType.CONV_I8_I8,
425 QuantType.CONV_I16_I8,
426 ],
427 },
428 "template": True,
429 },
430 "conv2d_relu6_TEMPLATE": {
431 "operands": (1, 2),
432 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
433 "types": {
434 "tf": [tf.float32],
435 "tflite": [
436 tf.float32,
437 QuantType.CONV_U8_U8,
438 QuantType.CONV_I8_I8,
439 QuantType.CONV_I16_I8,
440 ],
441 },
442 "template": True,
443 },
444 "conv2d_relu_n1_to_1_TEMPLATE": {
445 "operands": (1, 2),
446 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
447 "types": {
448 "tf": [tf.float32],
449 "tflite": [
450 tf.float32,
451 QuantType.CONV_U8_U8,
452 QuantType.CONV_I8_I8,
453 QuantType.CONV_I16_I8,
454 ],
455 },
456 "template": True,
457 },
458 # This test is converted as:
459 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
460 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
461 "conv2d_tanh_TEMPLATE": {
462 "operands": (1, 2),
463 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
464 "types": {
465 "tf": [tf.float32],
466 "tflite": [
467 tf.float32,
468 QuantType.CONV_U8_U8,
469 QuantType.CONV_I8_I8,
470 QuantType.CONV_I16_I8,
471 ],
472 },
473 "template": True,
474 },
475 "conv2d_bias_TEMPLATE": {
476 "operands": (1, 2),
477 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
478 "types": {
479 "tf": [tf.float32],
480 "tflite": [
481 tf.float32,
482 QuantType.CONV_U8_U8,
483 QuantType.CONV_I8_I8,
484 QuantType.CONV_I16_I8,
485 ],
486 },
487 "bias": True,
488 "template": True,
489 },
TatWai Chongfd629052022-07-25 04:01:58 +0000490 "conv3d_TEMPLATE": {
491 "operands": (1, 1),
492 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
493 "types": {
494 "tf": [tf.float32],
495 "tflite": [
496 tf.float32,
497 QuantType.CONV_U8_U8,
498 QuantType.CONV_I8_I8,
499 # Quantization to 16x8-bit not yet supported by tflite.
500 ],
501 },
502 "template": True,
503 "rank": (1, 5),
504 },
505 "conv3d_bias_TEMPLATE": {
506 "operands": (1, 2),
507 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
508 "types": {
509 "tf": [tf.float32],
510 "tflite": [
511 tf.float32,
512 QuantType.CONV_U8_U8,
513 QuantType.CONV_I8_I8,
514 # Quantization to 16x8-bit not yet supported by tflite.
515 ],
516 },
517 "bias": True,
518 "template": True,
519 "rank": (1, 5),
520 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000521 "depthwise_conv2d_TEMPLATE": {
522 "operands": (1, 1),
523 "build_fcn": (
524 TBuilder.DepthwiseConv2d,
525 TGen.tgDepthwiseConv2d,
526 ArgGen.agDepthwiseConv2d,
527 ),
528 "types": {
529 "tf": [tf.float32],
530 "tflite": [
531 tf.float32,
532 QuantType.CONV_U8_U8,
533 QuantType.CONV_I8_I8,
534 QuantType.CONV_I16_I8,
535 ],
536 },
537 "template": True,
538 },
539 "depthwise_conv2d_bias_TEMPLATE": {
540 "operands": (1, 2),
541 "build_fcn": (
542 TBuilder.DepthwiseConv2dWithBias,
543 TGen.tgDepthwiseConv2d,
544 ArgGen.agDepthwiseConv2d,
545 ),
546 "types": {
547 "tf": [tf.float32],
548 "tflite": [
549 tf.float32,
550 QuantType.CONV_U8_U8,
551 QuantType.CONV_I8_I8,
552 QuantType.CONV_I16_I8,
553 ],
554 },
555 "bias": True,
556 "template": True,
557 },
558 "transpose_conv2d_TEMPLATE": {
559 "operands": (1, 1),
560 "build_fcn": (
561 TBuilder.TransposeConv2d,
562 TGen.tgTransposeConv2d,
563 ArgGen.agTransposeConv2d,
564 ),
565 "types": {
566 "tf": [tf.float32],
567 "tflite": [
568 tf.float32,
569 QuantType.CONV_U8_U8,
570 QuantType.CONV_I8_I8,
571 QuantType.CONV_I16_I8,
572 ],
573 },
574 "template": True,
575 },
576 "argmax": {
577 "operands": (1, 0),
578 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
579 "types": {"tf": TYPE_F},
580 },
581 "avg_pool2d": {
582 "operands": (1, 0),
583 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
584 "types": {
585 "tf": TYPE_F,
586 "tflite": list(
587 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
588 ),
589 },
590 },
591 "max_pool2d": {
592 "operands": (1, 0),
593 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
594 "types": {
595 "tf": TYPE_F,
596 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
597 # ALL_I16 not supported yet
598 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
599 # QI16 is missing from MaxPoolOperandAndResultConstraints
600 # If adding QI16 back this test can run through.
601 },
602 },
603 "reshape": {
604 "operands": (1, 0),
605 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
606 "types": TYPE_FI,
607 },
608 "transpose": {
609 "operands": (1, 0),
610 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
611 "types": TYPE_FI,
612 },
613 "slice": {
614 "operands": (1, 0),
615 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
616 "types": TYPE_FI,
617 },
618 "strided_slice": {
619 "operands": (1, 0),
620 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
621 "types": TYPE_FI,
622 },
623 "select": {
624 "operands": (3, 0),
625 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
626 "types": TYPE_FI,
627 },
628 "addn": {
629 "operands": (4, 0),
630 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
631 "types": TYPE_FI,
632 },
633 "concatv2": {
634 "operands": (4, 0),
635 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
636 "types": TYPE_FI,
637 },
638 "stack": {
639 "operands": (4, 0),
640 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
641 "types": TYPE_FI,
642 },
643 "unstack": {
644 "operands": (1, 0),
645 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
646 "types": TYPE_F,
647 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000648 "mirrorpad": {
649 "operands": (1, 0),
650 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
651 "types": TYPE_FI,
652 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000653 "pad": {
654 "operands": (1, 0),
655 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800656 "types": {
657 "tf": TYPE_F,
658 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
659 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000660 },
661 "expand_dims": {
662 "operands": (1, 0),
663 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
664 "types": TYPE_FI,
665 },
666 "shape": {
667 "operands": (1, 0),
668 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
669 "types": TYPE_FI,
670 },
671 "rank": {
672 "operands": (1, 0),
673 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
674 "types": TYPE_FI,
675 },
676 "fill": {
677 "operands": (1, 0),
678 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
679 "types": TYPE_FI,
680 },
681 "elu": {
682 "operands": (1, 0),
683 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
684 "types": TYPE_F,
685 },
686 "softmax": {
687 "operands": (1, 0),
688 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
689 "types": {
690 "tf": TYPE_F,
691 "tflite": list(
692 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
693 ),
694 },
695 },
696 "log_softmax": {
697 "operands": (1, 0),
698 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
699 "types": TYPE_F,
700 },
701 "matmul": {
702 "operands": (2, 0),
703 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
704 "types": {
705 "tf": TYPE_F,
706 "tflite": list(
707 TYPE_F
708 + [QuantType.ALL_U8, QuantType.ALL_I8]
709 # 16 bits matmul fail to convert
710 ),
711 },
712 },
713 "add_scalar": {
714 "operands": (1, 0),
715 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
716 "types": TYPE_F,
717 },
718 "add_1d": {
719 "operands": (2, 0),
720 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
721 "types": TYPE_F,
722 },
723 "split": {
724 "operands": (1, 0),
725 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
726 "types": TYPE_FI,
727 },
728 "tile": {
729 "operands": (1, 0),
730 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
731 "types": TYPE_FI,
732 },
733 "reverse": {
734 "operands": (1, 0),
735 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
736 "types": {"tf": TYPE_FI},
737 },
738 "gather": {
739 "operands": (1, 0),
740 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
741 "types": TYPE_FI,
742 },
743 "gather_nd": {
744 "operands": (1, 0),
745 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
746 "types": TYPE_FI,
747 },
748 "scatter_nd": {
749 "operands": (1, 0),
750 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
751 "types": TYPE_FI,
752 },
753 "space_to_batch": {
754 "operands": (1, 0),
755 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
756 "types": TYPE_F,
757 },
758 "batch_to_space": {
759 "operands": (1, 0),
760 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
761 "types": TYPE_F,
762 },
763 "space_to_depth": {
764 "operands": (1, 0),
765 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
766 "types": TYPE_F,
767 },
768 "depth_to_space": {
769 "operands": (1, 0),
770 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
771 "types": TYPE_F,
772 },
773 "one_hot": {
774 "operands": (3, 1),
775 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
776 "types": TYPE_FI,
777 },
778 "fakequant": {
779 "operands": (1, 0),
780 "build_fcn": (
781 TBuilder.Fakequant,
782 TGen.tgBasic,
783 ArgGen.agFakequant,
784 ),
785 "types": {"tf": TYPE_F},
786 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800787 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000788 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800789 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700790 "types": {
791 "tf": TYPE_F,
792 "tflite": list(
793 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
794 ),
795 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000796 "custom_shapes": {
797 "custom_shape_only": False,
798 "shape_list": [(3, 1, 1, 7)],
799 },
TatWai Chongf7326092022-06-08 12:17:14 -0700800 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000801 "left_shift": {
802 "operands": (1, 0),
803 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
804 "types": {"tf": [tf.int32]},
805 },
806 "right_shift": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
809 "types": {
810 "tf": [
811 tf.int32,
812 ]
813 },
814 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700815 "while": {
816 "operands": (1, 0),
817 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
818 "types": {
819 "tflite": list(TYPE_F),
820 },
821 },
822 "lstm": {
823 "operands": (1, 0),
824 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
825 "types": {
826 "tflite": [
827 tf.float32,
828 # tf.int32
829 ]
830 },
831 },
832 "gru": {
833 "operands": (1, 0),
834 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
835 "types": {
836 "tflite": [
837 tf.float32,
838 # tf.int32
839 ]
840 },
841 },
842 "rnn": {
843 "operands": (1, 0),
844 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
845 "types": {
846 "tflite": [
847 tf.float32,
848 ]
849 },
850 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000851 "rfft2d": {
852 "operands": (1, 0),
853 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
854 "types": {
855 "tflite": TYPE_F,
856 },
857 },
Luke Hutton714aa602023-02-08 19:45:26 +0000858 "real": {
859 "operands": (1, 0),
860 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
861 "types": {
862 "tflite": [tf.complex64],
863 },
864 },
865 "imag": {
866 "operands": (1, 0),
867 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
868 "types": {
869 "tflite": [tf.complex64],
870 },
871 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000872 "broadcastto": {
873 "operands": (1, 1),
874 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
875 "types": {
876 "tf": TYPE_FIB,
877 },
878 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000879}
880
881# Shapes to be tested; default can be overwritten
882shape_list = [
883 (1,),
884 (64,),
885 (14, 19),
886 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000887 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000888 (1, 4, 4, 4),
889 (1, 8, 4, 17),
890 (1, 4, 8, 19),
891 (1, 32, 32, 8),
892 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800893 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000894 (2, 2, 7, 7, 2),
895 (1, 4, 8, 21, 17),
896 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000897]
898
899
900def gen_rand_shapes(args):
901 """Overwrite the global shape list with a new list of random shapes"""
902 global shape_list
903
904 rng = np.random.default_rng(args.random_seed)
905
906 # Don't let things get too big... cap the maximum volume, but let
907 # an individual dimension be 1..47
908 max_total_volume = 32 * 32 * 4
909
910 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000911 # Only iterate over ranks 2, 3, 4, and 5
912 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000913 for n in range(args.random_shapes):
914 new_shape = rng.integers(1, 48, size=rank)
915
TatWai Chongfd629052022-07-25 04:01:58 +0000916 # Set the batch dimension on 4D or 5D objects to 1
917 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000918 new_shape[0] = 1
919
920 # Limit the total shape volume and throw out any
921 # shapes that wouldn't leave at least size=2 in some non-batch dimension
922 volume = 1
923 skip_shape = False
924 for i in range(rank):
925
926 volume *= new_shape[i]
927
928 # Reduce the shape, while it's larger than the maximum volume
929 while volume > max_total_volume:
930 new_shape[i] = new_shape[i] // 2
931 volume = volume // 2
932
933 # Now an untenable dimension size? Skip this one.
934 if new_shape[i] < 1:
935 skip_shape = True
936
937 if not skip_shape:
938 shape_list.append(tuple(new_shape))
939
940
941# Construct, run and save a whole tensorflow tf.function to a protobuf file
942# or convert to .tflite if it's quantized unit test
943def run_unit_test(
944 op_name,
945 args,
946 test_dir,
947 curr_shape,
948 addl_args,
949 dtype,
950 excluded_framework_list,
951 quantized_inference_dtype,
952 result_name,
953 seed,
954):
955
956 try:
957 op = TF_OP_LIST[op_name]
958 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
959
960 # Get and seed a random number generator for this test
961 rng = np.random.default_rng(seed)
962
Won Jeone2325d12023-06-10 15:25:54 +0000963 # For broadcast fuzzing, record the fuzzed index if fuzzing is already done.
964 fuzzed = []
965
Jeremy Johnson015c3552022-02-23 12:15:03 +0000966 # return placeholders=(str: name, np.array: value)
967 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +0000968 placeholders, consts = (
969 tensor_gen_fcn(op, curr_shape, dtype, rng, fuzzed)
970 if tensor_gen_fcn.__name__ == "tgBFuzz"
971 else tensor_gen_fcn(op, curr_shape, dtype, rng)
972 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000973
974 # if test doesn't have any placeholders/consts, terminated
975 if len(placeholders) == 0 and len(consts) == 0:
976 return True
977
978 if not args.quiet:
979 print(" {} ".format(test_dir))
980
981 try:
982 os.mkdir(test_dir)
983 except FileExistsError:
984 pass
985
986 const_nodes = [value for name, value in consts]
987
988 num_placeholders = len(placeholders)
989 # if test is quantized, create tensor quantization metadata info for
990 # each input tensor, based on different quantized type
991 if quantized_inference_dtype:
992 is_quantized = True
993 # TODO: support INT8 IFM x INT4 weight later
994 if quantized_inference_dtype == QuantType.ALL_U8:
995 qzero = [128] * num_placeholders
996 numpy_dtype = [np.uint8] * num_placeholders
997 tflite_inference_dtype = tf.uint8
998 elif quantized_inference_dtype == QuantType.ALL_I8:
999 qzero = [0] * num_placeholders
1000 numpy_dtype = [np.int8] * num_placeholders
1001 tflite_inference_dtype = tf.int8
1002 elif quantized_inference_dtype == QuantType.ALL_I16:
1003 qzero = [0] * num_placeholders
1004 numpy_dtype = [np.int16] * num_placeholders
1005 tflite_inference_dtype = tf.int16
1006 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1007 assert (
1008 num_placeholders == 1
1009 ), "Unsupported number of placeholders for Convolution: {}".format(
1010 num_placeholders
1011 )
1012 qzero = [128] * num_placeholders
1013 if num_placeholders == 2:
1014 numpy_dtype = [np.uint8, np.uint8]
1015 else:
1016 numpy_dtype = [np.uint8, np.uint8, np.int32]
1017 tflite_inference_dtype = tf.uint8
1018 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1019 assert (
1020 num_placeholders == 1
1021 ), "Unsupported number of placeholders for Convolution: {}".format(
1022 num_placeholders
1023 )
1024 qzero = [0] * num_placeholders
1025 if num_placeholders == 2:
1026 numpy_dtype = [np.int8, np.int8]
1027 else:
1028 numpy_dtype = [np.int8, np.int8, np.int32]
1029 tflite_inference_dtype = tf.int8
1030 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1031 assert (
1032 num_placeholders == 1
1033 ), "Unsupported number of placeholders for Convolution: {}".format(
1034 num_placeholders
1035 )
1036 if num_placeholders == 2:
1037 qzero = [0, 0]
1038 numpy_dtype = [np.int16, np.int8]
1039 else:
1040 qzero = [0, 0, 0]
1041 numpy_dtype = [
1042 np.int16,
1043 np.int8,
1044 np.int64,
1045 ] # np.int64 to represent 40 bits accumulator
1046 tflite_inference_dtype = tf.int16
1047 else:
1048 raise Exception(
1049 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1050 )
1051
1052 else:
1053 is_quantized = False
1054
1055 tf_model_filename = None
1056 tf_result_npy_filename = None
1057 tf_result_name = None
1058
1059 tflite_model_filename = None
1060 tflite_result_npy_filename = None
1061 tflite_result_name = None
1062
1063 placeholder_names = []
1064 placeholder_vals = []
1065 placeholder_signatures = ()
1066 placeholder_npy_filenames = []
1067 placeholder_shapes = []
1068
1069 for idx, (name, val) in enumerate(placeholders):
1070 placeholder_names.append(name)
1071 placeholder_signatures = placeholder_signatures + (
1072 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1073 )
1074 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1075 placeholder_shapes.append(val.shape)
1076
1077 # Get test builder class
1078 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1079 concrete_function = tf.function(input_signature=placeholder_signatures)(
1080 fcn_node.eval
1081 ).get_concrete_function()
1082
1083 if is_quantized:
1084
1085 assert dtype is tf.float32, "quantized test must come from float32 graph"
1086
1087 # 1. Quantize float placeholder npy to quantized to feed the graph
1088 for idx, (name, val) in enumerate(placeholders):
1089
1090 # we use np.amin()/np.amax() to determine dynamic range
1091 # for quantized test
1092 zeropoint = 0
1093 scale = 1.0
1094 if numpy_dtype[idx] != np.int64:
1095 qmin = np.iinfo(numpy_dtype[idx]).min
1096 qmax = np.iinfo(numpy_dtype[idx]).max
1097 num_bits = np.iinfo(numpy_dtype[idx]).bits
1098 # 40 bit is represented as np.int64
1099 else:
1100 num_bits = 40
1101 qmin = -(1 << num_bits)
1102 qmax = (1 << num_bits) - 1
1103
1104 min_val = np.amin(val)
1105 max_val = np.amax(val)
1106
1107 # for single value tensor, we set scale equal to the abs(value),
1108 # and fix zeropoint to 128
1109 # if val > 0, it'll be represented as 129,
1110 # where val = (129 - 128) * val
1111 # if val < 0, it'll be represented as 127,
1112 # where val = (127 - 128) * (-val)
1113 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1114 # and let quantized 1 represent the value
1115 # also adjust effective min/max consequently
1116 if max_val == min_val:
1117 if max_val != 0:
1118 scale = abs(max_val)
1119 else:
1120 scale = 1.0
1121 min_val = float(qmin - qzero[idx]) * scale
1122 max_val = float(qmax - qzero[idx]) * scale
1123 else:
1124 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeondd14c1b2023-06-29 23:20:00 +00001125 zeropoint = -int(round((-min_val) / scale)) + qmin
1126
1127 # Exit if min_val <= 0.0, in order to avoid assertion error
1128 # from tf.quantization.fake_quant_with_min_max_args
1129 if min_val > 0.0:
1130 return True
Jeremy Johnson015c3552022-02-23 12:15:03 +00001131
1132 # run through tf.fakequant first to assure quantization error aligned
1133 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1134 val,
1135 min=min_val,
1136 max=max_val,
1137 num_bits=num_bits,
1138 name="gen_quant_npy",
1139 )
1140
Jerry Ged69e2832023-07-05 21:54:07 +00001141 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001142
1143 # very few unit tests after TF hash may/2020, this quantized
1144 # value for some reason exceed [0, 255] range
1145 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1146
Jeremy Johnson015c3552022-02-23 12:15:03 +00001147 np.save(
1148 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001149 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001150 False,
1151 )
1152
1153 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1154
1155 # 2. Convert the model to quantized TFLite flatbuffer
1156 module = tf.Module()
1157 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1158 [concrete_function], module
1159 )
1160 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1161 converter.experimental_new_converter = True
1162
1163 # use MLIR-based post-quantizer
1164 converter.experimental_new_quantizer = True
1165
1166 flag = (
1167 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1168 )
1169 if tflite_inference_dtype == tf.int16:
1170 converter.target_spec.supported_ops = [flag]
1171
Won Jeone2325d12023-06-10 15:25:54 +00001172 # Generator function for integer quantization of TFLiteConverter
1173 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1174 # to calibrate/estimate the range of the floating-point inputs.
1175 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1176 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001177 def input_stats():
1178 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001179 placeholders, _ = (
1180 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, fuzzed)
1181 if tensor_gen_fcn == "tgBFuzz"
1182 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1183 )
1184 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001185
1186 converter.representative_dataset = input_stats
1187 converter.inference_input_type = tflite_inference_dtype
1188 converter.inference_output_type = tflite_inference_dtype
1189
1190 tflite_model = converter.convert()
1191
1192 tflite_model_filename = "model.tflite"
1193
1194 # Write out converted model to disk
1195 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1196 f.write(tflite_model)
1197
1198 else: # is_quantized is False
1199
1200 # 1. Saved out numpy array directly
1201 for idx, (name, val) in enumerate(placeholders):
1202 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001203
1204 # Complex tensors are expected to be repsesented by a
1205 # single floating point tensor of shape [?, ..., ?, 2].
1206 if val.dtype == np.complex64:
1207 val_shape = val.shape + (2,)
1208 val = val.view(np.float32)
1209 val = val.reshape(val_shape)
1210
Jeremy Johnson015c3552022-02-23 12:15:03 +00001211 np.save(
1212 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1213 )
1214
1215 # 2.a Saved out .pb if framework includes tensorflow
1216 if "tf" not in excluded_framework_list:
1217 # Write out graph as protobuf to disk
1218 tf_model_filename = "model.pb"
1219 tf.io.write_graph(
1220 concrete_function.graph, test_dir, tf_model_filename, True
1221 )
1222
1223 # 2.b Saved out .tflite if framework includes tflite
1224 if "tflite" not in excluded_framework_list:
1225 # Convert the model to TFLite flatbuffer
1226 module = tf.Module()
1227 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1228 [concrete_function], module
1229 )
1230
1231 converter.experimental_new_converter = True
1232
1233 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1234 converter.inference_input_type = tf.float32
1235 converter.inference_output_type = tf.float32
1236 tflite_model = converter.convert()
1237
1238 # Write out converted model to disk
1239 tflite_model_filename = "model.tflite"
1240 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1241 f.write(tflite_model)
1242
1243 # Get TF reference result if .pb is specified
1244 if tf_model_filename:
1245 tf_result_npy_filename = "tf_result.npy"
1246 tf_result = concrete_function(*placeholder_vals)
1247 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1248
1249 tf_result_name = result_name
1250
1251 # Get TFLite inference result if .tflite is specified
1252 if tflite_model_filename:
1253 tflite_result_npy_filename = "tflite_result.npy"
1254
Luke Hutton5c844212023-01-27 14:17:52 +00001255 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001256
1257 if args.tflite_kernel_mode == "optimized" or (
1258 op_name in ops_with_optimized_only_kernel
1259 ):
1260 interpreter = tf.lite.Interpreter(
1261 model_path=os.path.join(test_dir, tflite_model_filename)
1262 )
1263 elif args.tflite_kernel_mode == "reference":
1264 interpreter = tf.lite.Interpreter(
1265 model_path=os.path.join(test_dir, tflite_model_filename),
1266 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1267 )
1268 else:
1269 assert 0, "unknown tflite interpreter mode {}".format(
1270 args.tflite_kernel_mode
1271 )
1272 interpreter.allocate_tensors()
1273
1274 input_details = interpreter.get_input_details()
1275 output_details = interpreter.get_output_details()
1276
1277 assert len(input_details) == len(
1278 placeholder_vals
1279 ), "number of placeholder mismatch"
1280
1281 for idx, val in enumerate(placeholder_vals):
1282 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1283
1284 interpreter.invoke()
1285 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1286
1287 np.save(
1288 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1289 )
1290
1291 # Result tensor name would change after converting to TFLite flatbuffer
1292 # Overwrite the information from TFLite models directly.
1293 # Assume single result tensor now
1294 tflite_result_name = output_details[0]["name"]
1295
1296 # Write out test descriptor
1297 write_test_json(
1298 filename=os.path.join(test_dir, "test.json"),
1299 tf_model_filename=tf_model_filename,
1300 tf_result_npy_filename=tf_result_npy_filename,
1301 tf_result_name=tf_result_name,
1302 tflite_model_filename=tflite_model_filename,
1303 tflite_result_npy_filename=tflite_result_npy_filename,
1304 tflite_result_name=tflite_result_name,
1305 ifm_name=placeholder_names,
1306 ifm_file=placeholder_npy_filenames,
1307 ifm_shape=placeholder_shapes,
1308 framework_exclusions=excluded_framework_list,
1309 quantized=is_quantized,
1310 )
1311 except Exception as e:
1312 msg = "Error running task: {}".format(e)
1313 print(msg)
1314 print(
1315 "".join(
1316 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1317 )
1318 )
1319 return False
1320 return True
1321
1322
1323def build_const_net(
1324 args,
1325 curr_shape,
1326 op_name,
1327 dtype,
1328 excluded_framework_list,
1329 quantized_inference_dtype,
1330 result_name,
1331 seed,
1332 rng,
1333 filter,
1334 unit_test_args,
1335):
1336
1337 if quantized_inference_dtype:
1338 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1339 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1340 else:
1341 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1342 test_dir = os.path.join(args.output_dir, test_dir)
1343
1344 # If the operator has an additional function to generate arguments, call it
1345 # here and iterate through the argument list that it generates
1346 op = TF_OP_LIST[op_name]
1347 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1348
TatWai Chongfd629052022-07-25 04:01:58 +00001349 try:
1350 rank_lo, rank_hi = op["rank"]
1351 except KeyError:
1352 # Set testing rank to (1, 4) in default.
1353 rank_lo = 1
1354 rank_hi = 4
1355
1356 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1357 return
1358
Jeremy Johnson015c3552022-02-23 12:15:03 +00001359 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1360 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001361 # Only filter on the full test_name, not the output directory
1362 _, test_name = os.path.split(test_dir + desc)
1363 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001364 unit_test_args.append(
1365 [
1366 op_name,
1367 args,
1368 test_dir + desc,
1369 curr_shape,
1370 addl_args,
1371 dtype,
1372 excluded_framework_list,
1373 quantized_inference_dtype,
1374 result_name,
1375 seed,
1376 ]
1377 )
1378
1379
1380# python hash is not reproducible, create hash for our purpose
1381def op_name_hash(op_name):
1382 result = 0xDEADBEEF
1383 for ch in op_name:
1384 if result & 1:
1385 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1386 else:
1387 result = (ord(ch) << 24) ^ (result >> 1)
1388
1389 return result
1390
1391
1392def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1393
1394 if not args.quiet:
1395 print(
1396 "Generating tests for {} ".format(
1397 op_name
1398 )
1399 )
1400
1401 op = TF_OP_LIST[op_name]
1402
1403 # Seed the RNG so that we get the same random tests for each test each time
1404 # If the number of tests for a given generation function changes, the tests
1405 # for that operator may also change accordingly, but this will at least keep
1406 # down churn across operators.
1407
1408 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1409 np.int32
1410 ).max
1411 rng = np.random.default_rng(bounded_hash_val)
1412
1413 # this is a dictionary with 'tf' and 'tflite' as key
1414 # and value being the data types we want to test under these framework
1415
1416 if isinstance(op["types"], dict):
1417 try:
1418 tf_dtypes = op["types"]["tf"]
1419 except KeyError:
1420 tf_dtypes = []
1421 try:
1422 tflite_dtypes = op["types"]["tflite"]
1423 except KeyError:
1424 tflite_dtypes = []
1425 elif isinstance(op["types"], list):
1426 tf_dtypes = op["types"]
1427 tflite_dtypes = op["types"]
1428
1429 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1430 tflite_quantized_dtypes = []
1431 tflite_nonquantized_dtypes = []
1432 for dtype in tflite_dtypes:
1433 if isinstance(dtype, QuantType):
1434 tflite_quantized_dtypes.append(dtype)
1435 else:
1436 tflite_nonquantized_dtypes.append(dtype)
1437
1438 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1439 set(tflite_nonquantized_dtypes)
1440 )
1441 nonquantized_dtypes = list(nonquantized_dtypes_set)
1442 quantized_dtypes = tflite_quantized_dtypes
1443
Jerry Ge5dd5a552023-05-23 22:41:20 +00001444 # append custom_shapes or replace shape_list with custom_shapes
1445 try:
1446 custom_shapes = op["custom_shapes"]
1447 if custom_shapes["custom_shape_only"]:
1448 shape_list = custom_shapes["shape_list"]
1449 else:
Jerry Geabdac232023-06-12 16:27:16 +00001450 shape_list = shape_list.copy()
1451 shape_list.append(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001452 except KeyError:
1453 pass
1454
Jeremy Johnson015c3552022-02-23 12:15:03 +00001455 # populate non quantized unit test arguments
1456 for dtype in nonquantized_dtypes:
1457
1458 excluded_framework_set = set(ALL_FRAMEWORKS)
1459 if dtype in tf_nonquantized_dtypes:
1460 excluded_framework_set.remove("tf")
1461 if dtype in tflite_nonquantized_dtypes:
1462 excluded_framework_set.remove("tflite")
1463 excluded_framework_list = list(excluded_framework_set)
1464
1465 for curr_shape in shape_list:
1466 build_const_net(
1467 args,
1468 curr_shape,
1469 op_name,
1470 dtype,
1471 excluded_framework_list,
1472 None,
1473 result_name,
1474 bounded_hash_val,
1475 rng,
1476 filter,
1477 unit_test_args,
1478 )
1479
1480 # populate quantized unit test arguments
1481 # must exclude 'tf' and source dtype being tf.float32
1482 for dtype in quantized_dtypes:
1483 for curr_shape in shape_list:
1484 build_const_net(
1485 args,
1486 curr_shape,
1487 op_name,
1488 tf.float32,
1489 ["tf"],
1490 dtype,
1491 result_name,
1492 bounded_hash_val,
1493 rng,
1494 filter,
1495 unit_test_args,
1496 )
1497
1498 return unit_test_args
1499
1500
1501def createDynamicOpLists():
1502 """The templated operators are conv2d-style operators with a number of kernel
1503 sizes. Since the operator is unchanged, we generate the range of kernel
1504 sizes here in this loop and remove the original templates from the list.
1505
1506 This could be expanded to non-conv2d-style operators in the future."""
1507
1508 # Dynamically create op lists for convolutions with a list of kernel sizes
1509 KERNELS = [
1510 [1, 1],
1511 [3, 3],
1512 [5, 5],
1513 ]
1514
TatWai Chongfd629052022-07-25 04:01:58 +00001515 # dim = [D, H, W]
1516 KERNELS_3D = [
1517 [1, 1, 1],
1518 [2, 3, 3],
1519 [3, 5, 5],
1520 ]
1521
Jeremy Johnson015c3552022-02-23 12:15:03 +00001522 TEMPLATE_LIST = [
1523 "conv2d",
1524 "conv2d_bias",
1525 "conv2d_relu",
1526 "conv2d_relu6",
1527 "conv2d_relu_n1_to_1",
1528 "conv2d_tanh",
1529 "depthwise_conv2d",
1530 "depthwise_conv2d_bias",
1531 "transpose_conv2d",
1532 ]
1533
TatWai Chongfd629052022-07-25 04:01:58 +00001534 TEMPLATE_LIST_CONV3D = [
1535 "conv3d",
1536 "conv3d_bias",
1537 ]
1538
Jeremy Johnson015c3552022-02-23 12:15:03 +00001539 for t in TEMPLATE_LIST:
1540 for k in KERNELS:
1541 testName = "{}_{}x{}".format(t, k[0], k[1])
1542 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1543 TF_OP_LIST[testName]["filter"] = k
1544 TF_OP_LIST[testName]["template"] = False
1545
TatWai Chongfd629052022-07-25 04:01:58 +00001546 # The existing operators don't support the dimension of kernel that is higher than 2.
1547 for t in TEMPLATE_LIST_CONV3D:
1548 for k in KERNELS_3D:
1549 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1550 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1551 TF_OP_LIST[testName]["filter"] = k
1552 TF_OP_LIST[testName]["template"] = False
1553
Jeremy Johnson015c3552022-02-23 12:15:03 +00001554 # Delete any templates after having created any dynamic ops
1555 # This is a two-pass operation because it's bad practice to delete
1556 # keys from dictionaries while iterating
1557 keyList = []
1558 for k in TF_OP_LIST:
1559 try:
1560 if TF_OP_LIST[k]["template"]:
1561 keyList.append(k)
1562 continue
1563 except KeyError:
1564 pass
1565
1566 for k in keyList:
1567 del TF_OP_LIST[k]
1568
1569
1570def main():
1571 parser = argparse.ArgumentParser()
1572 parser.add_argument(
1573 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1574 )
1575 parser.add_argument(
1576 "--random-shapes",
1577 dest="random_shapes",
1578 default=0,
1579 type=int,
1580 help=(
1581 "Use N random shapes of each rank for generating tests,"
1582 "seeded with random seed"
1583 ),
1584 )
1585 parser.add_argument(
1586 "-o",
1587 "--output-dir",
1588 dest="output_dir",
1589 default=".",
1590 type=str,
1591 help="Test output directory path prefix",
1592 )
1593 parser.add_argument(
1594 "-q",
1595 "--quiet",
1596 dest="quiet",
1597 default=False,
1598 action="store_true",
1599 help="Do not print test names",
1600 )
1601 parser.add_argument(
1602 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1603 )
1604 parser.add_argument(
1605 "-m",
1606 "--tflite-kernel-mode",
1607 dest="tflite_kernel_mode",
1608 type=str,
1609 choices=["reference", "optimized"],
1610 default="reference",
1611 help="TFLite interpreter kernel mode",
1612 )
1613 parser.add_argument(
1614 "--num-samples",
1615 dest="num_samples",
1616 default=200,
1617 type=int,
1618 help="Number of input samples for post-training quantization",
1619 )
1620 parser.add_argument(
1621 "--filter",
1622 dest="filter",
1623 default="",
1624 type=str,
1625 help="Filter test names by this expression",
1626 )
1627 args = parser.parse_args()
1628
1629 # Turn the filter into a re object if present
1630 filter = None
1631 if args.filter != "":
1632 filter = re.compile(args.filter)
1633
1634 # Autodetect CPU count
1635 if args.jobs <= 0:
1636 args.jobs = os.cpu_count()
1637
1638 # Disable TF info messages
1639 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1640
1641 try:
1642 os.makedirs(args.output_dir)
1643 except FileExistsError:
1644 pass
1645
1646 if args.random_shapes:
1647 gen_rand_shapes(args)
1648
1649 # Build dynamic ops
1650 createDynamicOpLists()
1651
1652 # Generate the test list and arguments to run_unit_test()
1653 unit_test_args = []
1654
1655 for op in TF_OP_LIST:
1656 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1657
1658 errors = 0
1659 for t in unit_test_args:
1660 if not run_unit_test(*t):
1661 errors = errors + 1
1662
1663 if not args.quiet:
1664 print("\nAll tasks done - with {} errors".format(errors))
1665
1666 return 1 if errors else 0
1667
1668
1669if __name__ == "__main__":
1670 exit(main())