blob: ec009c6c019615a07fe7a5cf8d0ac494ed4af87f [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000059# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
63# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
64# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000065
66TF_OP_LIST = {
67 "add": {
68 "operands": (2, 0),
69 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
70 "types": {
71 "tf": TYPE_FI,
72 "tflite": list(
73 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
74 ),
75 },
76 },
77 "sub": {
78 "operands": (2, 0),
79 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
80 "types": {
81 "tf": TYPE_FI,
82 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
83 # QuantType.ALL_I16 fail in TFLite conversion
84 },
85 },
86 "mul": {
87 "operands": (2, 0),
88 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
89 "types": {
90 "tf": TYPE_FI,
91 "tflite": list(
92 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
93 ),
94 },
95 },
96 "exp": {
97 "operands": (1, 0),
98 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
99 "types": TYPE_F,
100 },
101 "rcp": {
102 "operands": (1, 0),
103 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
104 "types": TYPE_F,
105 },
106 "relu": {
107 "operands": (1, 0),
108 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
109 "types": {
110 "tf": TYPE_F,
111 "tflite": list(
112 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
113 ),
114 },
115 },
Jerry Ge93912432022-07-22 10:29:13 -0700116 "relu1": {
117 "operands": (1, 0),
118 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
119 "types": {
120 "tf": [],
121 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
122 },
123 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000124 "relu0To1": {
125 "operands": (1, 0),
126 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
127 "types": {
128 "tf": [],
129 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
130 },
131 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000132 "relu6": {
133 "operands": (1, 0),
134 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
135 "types": {
136 "tf": TYPE_F,
137 "tflite": list(
138 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
139 ),
140 },
141 },
142 "leaky_relu": {
143 "operands": (1, 0),
144 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
145 "types": {
146 "tf": TYPE_F,
147 "tflite": list(
148 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
149 ),
150 },
151 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000152 "prelu": {
153 "operands": (1, 0),
154 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
155 "types": {
156 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
157 },
158 },
TatWai Chong473eb382022-08-02 04:21:30 +0000159 "gelu": {
160 "operands": (1, 0),
161 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
162 "types": {
163 # Need compiler support for tf.Erf.
164 # "tf": TYPE_F,
165 "tflite": list(
166 # Only float32, int8 and uint8 supported currently
167 TYPE_F
168 + [QuantType.ALL_U8, QuantType.ALL_I8]
169 ),
170 },
171 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000172 "concat": {
173 "operands": (2, 0),
174 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
175 "types": TYPE_FI,
176 },
177 "bitwise_and": {
178 "operands": (2, 0),
179 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
180 "types": {"tf": TYPE_I}, # Not supported in TF Lite
181 },
182 "bitwise_or": {
183 "operands": (2, 0),
184 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
185 "types": {"tf": TYPE_I}, # Not supported in TF Lite
186 },
187 "bitwise_not": {
188 "operands": (1, 0),
189 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
190 "types": {"tf": TYPE_I}, # Not supported in TF Lite
191 },
192 "bitwise_xor": {
193 "operands": (2, 0),
194 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
195 "types": {"tf": TYPE_I}, # Not supported in TF Lite
196 },
197 "logical_and": {
198 "operands": (2, 0),
199 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
200 "types": TYPE_B,
201 },
202 "logical_or": {
203 "operands": (2, 0),
204 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
205 "types": TYPE_B,
206 },
207 "logical_not": {
208 "operands": (1, 0),
209 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
210 "types": TYPE_B,
211 },
212 "reduce_any": {
213 "operands": (1, 0),
214 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
215 "types": TYPE_B,
216 },
217 "reduce_all": {
218 "operands": (1, 0),
219 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800220 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000221 },
222 "reduce_min": {
223 "operands": (1, 0),
224 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
225 "types": {
226 "tf": TYPE_FI,
227 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
228 },
229 },
230 "reduce_max": {
231 "operands": (1, 0),
232 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
233 "types": {
234 "tf": TYPE_FI,
235 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 },
237 },
238 "reduce_sum": {
239 "operands": (1, 0),
240 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
241 "types": {
242 "tf": TYPE_F,
243 # v2 converter doesn't recognize quantized reduce_sum
244 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
245 "tflite": TYPE_F,
246 },
247 },
248 "reduce_mean": {
249 "operands": (1, 0),
250 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
251 "types": {
252 "tf": TYPE_F,
253 "tflite": list(
254 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
255 ),
256 },
257 },
258 "reduce_product": {
259 "operands": (1, 0),
260 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
261 "types": TYPE_F,
262 },
263 "min": {
264 "operands": (2, 0),
265 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
266 "types": TYPE_FI,
267 },
268 "max": {
269 "operands": (2, 0),
270 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
271 "types": TYPE_FI,
272 },
273 "pow": {
274 "operands": (2, 0),
275 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
276 # Technically, integer is supported, but only for positive exponents.
277 # Needs a random argument generator.
278 "types": TYPE_F,
279 },
280 "abs": {
281 "operands": (1, 0),
282 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
283 "types": TYPE_F,
284 },
285 "ceil": {
286 "operands": (1, 0),
287 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
288 "types": TYPE_F,
289 },
290 "floor": {
291 "operands": (1, 0),
292 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
293 "types": TYPE_F,
294 },
295 "log": {
296 "operands": (1, 0),
297 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
298 "types": TYPE_F,
299 },
300 "negate": {
301 "operands": (1, 0),
302 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
303 "types": TYPE_F,
304 },
305 "rsqrt": {
306 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800307 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
308 "types": {
309 "tf": TYPE_F,
310 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
311 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000312 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800313 "sign": {
314 "operands": (1, 0),
315 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
316 "types": {
317 "tf": TYPE_F,
318 },
319 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000320 "sigmoid": {
321 "operands": (1, 0),
322 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
323 "types": {
324 "tf": TYPE_F,
325 "tflite": list(
326 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
327 ),
328 },
329 },
330 "tanh": {
331 "operands": (1, 0),
332 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
333 "types": {
334 "tf": TYPE_F,
335 "tflite": list(
336 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
337 ),
338 },
339 },
Won Jeon78155c62023-06-10 00:20:04 +0000340 "erf": {
341 "operands": (1, 0),
342 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
343 "types": {
344 "tf": TYPE_F,
345 },
346 },
Luke Hutton41601862022-12-06 17:29:15 +0000347 "sin": {
348 "operands": (1, 0),
349 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000350 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000351 },
352 "cos": {
353 "operands": (1, 0),
354 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000355 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000356 },
Luke Hutton2138a192022-12-15 11:01:39 +0000357 "atan2": {
358 "operands": (2, 0),
359 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
360 "types": {
361 "tflite": TYPE_F,
362 },
363 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000364 "square": {
365 "operands": (1, 0),
366 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
367 "types": TYPE_F,
368 },
369 "squared_difference": {
370 "operands": (2, 0),
371 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
Won Jeondd14c1b2023-06-29 23:20:00 +0000372 "types": {
373 "tf": TYPE_F,
374 "tflite": list(TYPE_FI + [QuantType.ALL_I8]),
375 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000376 },
377 "equal": {
378 "operands": (2, 0),
379 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
380 "types": TYPE_FI,
381 },
382 "greater_equal": {
383 "operands": (2, 0),
384 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
385 "types": TYPE_FI,
386 },
387 "greater": {
388 "operands": (2, 0),
389 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
390 "types": TYPE_FI,
391 },
392 "less": {
393 "operands": (2, 0),
394 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
395 "types": TYPE_FI,
396 },
397 "less_equal": {
398 "operands": (2, 0),
399 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
400 "types": TYPE_FI,
401 },
402 "conv2d_TEMPLATE": {
403 "operands": (1, 1),
404 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
405 "types": {
406 "tf": [tf.float32],
407 "tflite": [
408 tf.float32,
409 QuantType.CONV_U8_U8,
410 QuantType.CONV_I8_I8,
411 QuantType.CONV_I16_I8,
412 ],
413 },
414 "template": True,
415 },
416 "conv2d_relu_TEMPLATE": {
417 "operands": (1, 2),
418 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
419 "types": {
420 "tf": [tf.float32],
421 "tflite": [
422 tf.float32,
423 QuantType.CONV_U8_U8,
424 QuantType.CONV_I8_I8,
425 QuantType.CONV_I16_I8,
426 ],
427 },
428 "template": True,
429 },
430 "conv2d_relu6_TEMPLATE": {
431 "operands": (1, 2),
432 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
433 "types": {
434 "tf": [tf.float32],
435 "tflite": [
436 tf.float32,
437 QuantType.CONV_U8_U8,
438 QuantType.CONV_I8_I8,
439 QuantType.CONV_I16_I8,
440 ],
441 },
442 "template": True,
443 },
444 "conv2d_relu_n1_to_1_TEMPLATE": {
445 "operands": (1, 2),
446 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
447 "types": {
448 "tf": [tf.float32],
449 "tflite": [
450 tf.float32,
451 QuantType.CONV_U8_U8,
452 QuantType.CONV_I8_I8,
453 QuantType.CONV_I16_I8,
454 ],
455 },
456 "template": True,
457 },
458 # This test is converted as:
459 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
460 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
461 "conv2d_tanh_TEMPLATE": {
462 "operands": (1, 2),
463 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
464 "types": {
465 "tf": [tf.float32],
466 "tflite": [
467 tf.float32,
468 QuantType.CONV_U8_U8,
469 QuantType.CONV_I8_I8,
470 QuantType.CONV_I16_I8,
471 ],
472 },
473 "template": True,
474 },
475 "conv2d_bias_TEMPLATE": {
476 "operands": (1, 2),
477 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
478 "types": {
479 "tf": [tf.float32],
480 "tflite": [
481 tf.float32,
482 QuantType.CONV_U8_U8,
483 QuantType.CONV_I8_I8,
484 QuantType.CONV_I16_I8,
485 ],
486 },
487 "bias": True,
488 "template": True,
489 },
TatWai Chongfd629052022-07-25 04:01:58 +0000490 "conv3d_TEMPLATE": {
491 "operands": (1, 1),
492 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
493 "types": {
494 "tf": [tf.float32],
495 "tflite": [
496 tf.float32,
497 QuantType.CONV_U8_U8,
498 QuantType.CONV_I8_I8,
499 # Quantization to 16x8-bit not yet supported by tflite.
500 ],
501 },
502 "template": True,
503 "rank": (1, 5),
504 },
505 "conv3d_bias_TEMPLATE": {
506 "operands": (1, 2),
507 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
508 "types": {
509 "tf": [tf.float32],
510 "tflite": [
511 tf.float32,
512 QuantType.CONV_U8_U8,
513 QuantType.CONV_I8_I8,
514 # Quantization to 16x8-bit not yet supported by tflite.
515 ],
516 },
517 "bias": True,
518 "template": True,
519 "rank": (1, 5),
520 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000521 "depthwise_conv2d_TEMPLATE": {
522 "operands": (1, 1),
523 "build_fcn": (
524 TBuilder.DepthwiseConv2d,
525 TGen.tgDepthwiseConv2d,
526 ArgGen.agDepthwiseConv2d,
527 ),
528 "types": {
529 "tf": [tf.float32],
530 "tflite": [
531 tf.float32,
532 QuantType.CONV_U8_U8,
533 QuantType.CONV_I8_I8,
534 QuantType.CONV_I16_I8,
535 ],
536 },
537 "template": True,
538 },
539 "depthwise_conv2d_bias_TEMPLATE": {
540 "operands": (1, 2),
541 "build_fcn": (
542 TBuilder.DepthwiseConv2dWithBias,
543 TGen.tgDepthwiseConv2d,
544 ArgGen.agDepthwiseConv2d,
545 ),
546 "types": {
547 "tf": [tf.float32],
548 "tflite": [
549 tf.float32,
550 QuantType.CONV_U8_U8,
551 QuantType.CONV_I8_I8,
552 QuantType.CONV_I16_I8,
553 ],
554 },
555 "bias": True,
556 "template": True,
557 },
558 "transpose_conv2d_TEMPLATE": {
559 "operands": (1, 1),
560 "build_fcn": (
561 TBuilder.TransposeConv2d,
562 TGen.tgTransposeConv2d,
563 ArgGen.agTransposeConv2d,
564 ),
565 "types": {
566 "tf": [tf.float32],
567 "tflite": [
568 tf.float32,
569 QuantType.CONV_U8_U8,
570 QuantType.CONV_I8_I8,
571 QuantType.CONV_I16_I8,
572 ],
573 },
574 "template": True,
575 },
576 "argmax": {
577 "operands": (1, 0),
578 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
579 "types": {"tf": TYPE_F},
580 },
581 "avg_pool2d": {
582 "operands": (1, 0),
583 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
584 "types": {
585 "tf": TYPE_F,
586 "tflite": list(
587 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
588 ),
589 },
590 },
591 "max_pool2d": {
592 "operands": (1, 0),
593 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
594 "types": {
595 "tf": TYPE_F,
596 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
597 # ALL_I16 not supported yet
598 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
599 # QI16 is missing from MaxPoolOperandAndResultConstraints
600 # If adding QI16 back this test can run through.
601 },
602 },
603 "reshape": {
604 "operands": (1, 0),
605 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
606 "types": TYPE_FI,
607 },
608 "transpose": {
609 "operands": (1, 0),
610 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
611 "types": TYPE_FI,
612 },
613 "slice": {
614 "operands": (1, 0),
615 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
616 "types": TYPE_FI,
617 },
618 "strided_slice": {
619 "operands": (1, 0),
620 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
621 "types": TYPE_FI,
622 },
623 "select": {
624 "operands": (3, 0),
625 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
626 "types": TYPE_FI,
627 },
628 "addn": {
629 "operands": (4, 0),
630 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
631 "types": TYPE_FI,
632 },
633 "concatv2": {
634 "operands": (4, 0),
635 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
636 "types": TYPE_FI,
637 },
638 "stack": {
639 "operands": (4, 0),
640 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
641 "types": TYPE_FI,
642 },
643 "unstack": {
644 "operands": (1, 0),
645 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
646 "types": TYPE_F,
647 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000648 "mirrorpad": {
649 "operands": (1, 0),
650 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
651 "types": TYPE_FI,
652 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000653 "pad": {
654 "operands": (1, 0),
655 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800656 "types": {
657 "tf": TYPE_F,
658 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
659 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000660 },
661 "expand_dims": {
662 "operands": (1, 0),
663 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
664 "types": TYPE_FI,
665 },
666 "shape": {
667 "operands": (1, 0),
668 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
669 "types": TYPE_FI,
670 },
671 "rank": {
672 "operands": (1, 0),
673 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
674 "types": TYPE_FI,
675 },
676 "fill": {
677 "operands": (1, 0),
678 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
679 "types": TYPE_FI,
680 },
681 "elu": {
682 "operands": (1, 0),
683 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
684 "types": TYPE_F,
685 },
686 "softmax": {
687 "operands": (1, 0),
688 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
689 "types": {
690 "tf": TYPE_F,
691 "tflite": list(
692 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
693 ),
694 },
695 },
696 "log_softmax": {
697 "operands": (1, 0),
698 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
699 "types": TYPE_F,
700 },
701 "matmul": {
702 "operands": (2, 0),
703 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
704 "types": {
705 "tf": TYPE_F,
706 "tflite": list(
707 TYPE_F
708 + [QuantType.ALL_U8, QuantType.ALL_I8]
709 # 16 bits matmul fail to convert
710 ),
711 },
712 },
713 "add_scalar": {
714 "operands": (1, 0),
715 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
716 "types": TYPE_F,
717 },
718 "add_1d": {
719 "operands": (2, 0),
720 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
721 "types": TYPE_F,
722 },
723 "split": {
724 "operands": (1, 0),
725 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
726 "types": TYPE_FI,
727 },
728 "tile": {
729 "operands": (1, 0),
730 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
731 "types": TYPE_FI,
732 },
733 "reverse": {
734 "operands": (1, 0),
735 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
736 "types": {"tf": TYPE_FI},
737 },
738 "gather": {
739 "operands": (1, 0),
740 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
741 "types": TYPE_FI,
742 },
743 "gather_nd": {
744 "operands": (1, 0),
745 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
746 "types": TYPE_FI,
747 },
748 "scatter_nd": {
749 "operands": (1, 0),
750 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
751 "types": TYPE_FI,
752 },
753 "space_to_batch": {
754 "operands": (1, 0),
755 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
756 "types": TYPE_F,
757 },
758 "batch_to_space": {
759 "operands": (1, 0),
760 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
761 "types": TYPE_F,
762 },
763 "space_to_depth": {
764 "operands": (1, 0),
765 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
766 "types": TYPE_F,
767 },
768 "depth_to_space": {
769 "operands": (1, 0),
770 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
771 "types": TYPE_F,
772 },
773 "one_hot": {
774 "operands": (3, 1),
775 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
776 "types": TYPE_FI,
777 },
778 "fakequant": {
779 "operands": (1, 0),
780 "build_fcn": (
781 TBuilder.Fakequant,
782 TGen.tgBasic,
783 ArgGen.agFakequant,
784 ),
785 "types": {"tf": TYPE_F},
786 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800787 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000788 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800789 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700790 "types": {
791 "tf": TYPE_F,
792 "tflite": list(
793 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
794 ),
795 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000796 "custom_shapes": {
797 "custom_shape_only": False,
798 "shape_list": [(3, 1, 1, 7)],
799 },
TatWai Chongf7326092022-06-08 12:17:14 -0700800 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000801 "left_shift": {
802 "operands": (1, 0),
803 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
804 "types": {"tf": [tf.int32]},
805 },
806 "right_shift": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
809 "types": {
810 "tf": [
811 tf.int32,
812 ]
813 },
814 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700815 "while": {
816 "operands": (1, 0),
817 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
818 "types": {
819 "tflite": list(TYPE_F),
820 },
821 },
822 "lstm": {
823 "operands": (1, 0),
824 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
825 "types": {
826 "tflite": [
827 tf.float32,
828 # tf.int32
829 ]
830 },
831 },
832 "gru": {
833 "operands": (1, 0),
834 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
835 "types": {
836 "tflite": [
837 tf.float32,
838 # tf.int32
839 ]
840 },
841 },
842 "rnn": {
843 "operands": (1, 0),
844 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
845 "types": {
846 "tflite": [
847 tf.float32,
848 ]
849 },
850 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000851 "rfft2d": {
852 "operands": (1, 0),
853 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
854 "types": {
855 "tflite": TYPE_F,
856 },
857 },
Luke Hutton714aa602023-02-08 19:45:26 +0000858 "real": {
859 "operands": (1, 0),
860 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
861 "types": {
862 "tflite": [tf.complex64],
863 },
864 },
865 "imag": {
866 "operands": (1, 0),
867 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
868 "types": {
869 "tflite": [tf.complex64],
870 },
871 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000872 "broadcastto": {
873 "operands": (1, 1),
874 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
875 "types": {
876 "tf": TYPE_FIB,
877 },
878 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000879}
880
881# Shapes to be tested; default can be overwritten
882shape_list = [
883 (1,),
884 (64,),
885 (14, 19),
886 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000887 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000888 (1, 4, 4, 4),
889 (1, 8, 4, 17),
890 (1, 4, 8, 19),
891 (1, 32, 32, 8),
892 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800893 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000894 (2, 2, 7, 7, 2),
895 (1, 4, 8, 21, 17),
896 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000897]
898
899
900def gen_rand_shapes(args):
901 """Overwrite the global shape list with a new list of random shapes"""
902 global shape_list
903
904 rng = np.random.default_rng(args.random_seed)
905
906 # Don't let things get too big... cap the maximum volume, but let
907 # an individual dimension be 1..47
908 max_total_volume = 32 * 32 * 4
909
910 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000911 # Only iterate over ranks 2, 3, 4, and 5
912 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000913 for n in range(args.random_shapes):
914 new_shape = rng.integers(1, 48, size=rank)
915
TatWai Chongfd629052022-07-25 04:01:58 +0000916 # Set the batch dimension on 4D or 5D objects to 1
917 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000918 new_shape[0] = 1
919
920 # Limit the total shape volume and throw out any
921 # shapes that wouldn't leave at least size=2 in some non-batch dimension
922 volume = 1
923 skip_shape = False
924 for i in range(rank):
925
926 volume *= new_shape[i]
927
928 # Reduce the shape, while it's larger than the maximum volume
929 while volume > max_total_volume:
930 new_shape[i] = new_shape[i] // 2
931 volume = volume // 2
932
933 # Now an untenable dimension size? Skip this one.
934 if new_shape[i] < 1:
935 skip_shape = True
936
937 if not skip_shape:
938 shape_list.append(tuple(new_shape))
939
940
941# Construct, run and save a whole tensorflow tf.function to a protobuf file
942# or convert to .tflite if it's quantized unit test
943def run_unit_test(
944 op_name,
945 args,
946 test_dir,
947 curr_shape,
948 addl_args,
949 dtype,
950 excluded_framework_list,
951 quantized_inference_dtype,
952 result_name,
953 seed,
954):
955
956 try:
957 op = TF_OP_LIST[op_name]
958 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
959
960 # Get and seed a random number generator for this test
961 rng = np.random.default_rng(seed)
962
963 # return placeholders=(str: name, np.array: value)
964 # consts=(str: name, np.array: value)
Won Jeone2325d12023-06-10 15:25:54 +0000965 placeholders, consts = (
Won Jeon6c93f412023-07-08 07:04:08 +0000966 tensor_gen_fcn(op, curr_shape, dtype, rng, False)
Won Jeone2325d12023-06-10 15:25:54 +0000967 if tensor_gen_fcn.__name__ == "tgBFuzz"
968 else tensor_gen_fcn(op, curr_shape, dtype, rng)
969 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000970
971 # if test doesn't have any placeholders/consts, terminated
972 if len(placeholders) == 0 and len(consts) == 0:
973 return True
974
975 if not args.quiet:
976 print(" {} ".format(test_dir))
977
978 try:
979 os.mkdir(test_dir)
980 except FileExistsError:
981 pass
982
983 const_nodes = [value for name, value in consts]
984
985 num_placeholders = len(placeholders)
986 # if test is quantized, create tensor quantization metadata info for
987 # each input tensor, based on different quantized type
988 if quantized_inference_dtype:
989 is_quantized = True
990 # TODO: support INT8 IFM x INT4 weight later
991 if quantized_inference_dtype == QuantType.ALL_U8:
992 qzero = [128] * num_placeholders
993 numpy_dtype = [np.uint8] * num_placeholders
994 tflite_inference_dtype = tf.uint8
995 elif quantized_inference_dtype == QuantType.ALL_I8:
996 qzero = [0] * num_placeholders
997 numpy_dtype = [np.int8] * num_placeholders
998 tflite_inference_dtype = tf.int8
999 elif quantized_inference_dtype == QuantType.ALL_I16:
1000 qzero = [0] * num_placeholders
1001 numpy_dtype = [np.int16] * num_placeholders
1002 tflite_inference_dtype = tf.int16
1003 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
1004 assert (
1005 num_placeholders == 1
1006 ), "Unsupported number of placeholders for Convolution: {}".format(
1007 num_placeholders
1008 )
1009 qzero = [128] * num_placeholders
1010 if num_placeholders == 2:
1011 numpy_dtype = [np.uint8, np.uint8]
1012 else:
1013 numpy_dtype = [np.uint8, np.uint8, np.int32]
1014 tflite_inference_dtype = tf.uint8
1015 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1016 assert (
1017 num_placeholders == 1
1018 ), "Unsupported number of placeholders for Convolution: {}".format(
1019 num_placeholders
1020 )
1021 qzero = [0] * num_placeholders
1022 if num_placeholders == 2:
1023 numpy_dtype = [np.int8, np.int8]
1024 else:
1025 numpy_dtype = [np.int8, np.int8, np.int32]
1026 tflite_inference_dtype = tf.int8
1027 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1028 assert (
1029 num_placeholders == 1
1030 ), "Unsupported number of placeholders for Convolution: {}".format(
1031 num_placeholders
1032 )
1033 if num_placeholders == 2:
1034 qzero = [0, 0]
1035 numpy_dtype = [np.int16, np.int8]
1036 else:
1037 qzero = [0, 0, 0]
1038 numpy_dtype = [
1039 np.int16,
1040 np.int8,
1041 np.int64,
1042 ] # np.int64 to represent 40 bits accumulator
1043 tflite_inference_dtype = tf.int16
1044 else:
1045 raise Exception(
1046 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1047 )
1048
1049 else:
1050 is_quantized = False
1051
1052 tf_model_filename = None
1053 tf_result_npy_filename = None
1054 tf_result_name = None
1055
1056 tflite_model_filename = None
1057 tflite_result_npy_filename = None
1058 tflite_result_name = None
1059
1060 placeholder_names = []
1061 placeholder_vals = []
1062 placeholder_signatures = ()
1063 placeholder_npy_filenames = []
1064 placeholder_shapes = []
1065
1066 for idx, (name, val) in enumerate(placeholders):
1067 placeholder_names.append(name)
1068 placeholder_signatures = placeholder_signatures + (
1069 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1070 )
1071 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1072 placeholder_shapes.append(val.shape)
1073
1074 # Get test builder class
1075 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1076 concrete_function = tf.function(input_signature=placeholder_signatures)(
1077 fcn_node.eval
1078 ).get_concrete_function()
1079
1080 if is_quantized:
1081
1082 assert dtype is tf.float32, "quantized test must come from float32 graph"
1083
1084 # 1. Quantize float placeholder npy to quantized to feed the graph
1085 for idx, (name, val) in enumerate(placeholders):
1086
1087 # we use np.amin()/np.amax() to determine dynamic range
1088 # for quantized test
1089 zeropoint = 0
1090 scale = 1.0
1091 if numpy_dtype[idx] != np.int64:
1092 qmin = np.iinfo(numpy_dtype[idx]).min
1093 qmax = np.iinfo(numpy_dtype[idx]).max
1094 num_bits = np.iinfo(numpy_dtype[idx]).bits
1095 # 40 bit is represented as np.int64
1096 else:
1097 num_bits = 40
1098 qmin = -(1 << num_bits)
1099 qmax = (1 << num_bits) - 1
1100
1101 min_val = np.amin(val)
1102 max_val = np.amax(val)
1103
1104 # for single value tensor, we set scale equal to the abs(value),
1105 # and fix zeropoint to 128
1106 # if val > 0, it'll be represented as 129,
1107 # where val = (129 - 128) * val
1108 # if val < 0, it'll be represented as 127,
1109 # where val = (127 - 128) * (-val)
1110 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1111 # and let quantized 1 represent the value
1112 # also adjust effective min/max consequently
1113 if max_val == min_val:
1114 if max_val != 0:
1115 scale = abs(max_val)
1116 else:
1117 scale = 1.0
1118 min_val = float(qmin - qzero[idx]) * scale
1119 max_val = float(qmax - qzero[idx]) * scale
1120 else:
1121 scale = (max_val - min_val) / float(qmax - qmin)
Won Jeon6c93f412023-07-08 07:04:08 +00001122 if op_name == "squared_difference":
1123 zeropoint = -int(round((-min_val) / scale)) + qmin
1124 else:
1125 zeropoint = int(round((-min_val) / scale)) + qmin
Jeremy Johnson015c3552022-02-23 12:15:03 +00001126
1127 # run through tf.fakequant first to assure quantization error aligned
1128 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1129 val,
1130 min=min_val,
1131 max=max_val,
1132 num_bits=num_bits,
1133 name="gen_quant_npy",
1134 )
1135
Jerry Ged69e2832023-07-05 21:54:07 +00001136 quant_val = np.round(fakequant_val / scale) + zeropoint
Jeremy Johnson015c3552022-02-23 12:15:03 +00001137
1138 # very few unit tests after TF hash may/2020, this quantized
1139 # value for some reason exceed [0, 255] range
1140 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1141
Jeremy Johnson015c3552022-02-23 12:15:03 +00001142 np.save(
1143 os.path.join(test_dir, placeholder_npy_filenames[idx]),
Jerry Ged69e2832023-07-05 21:54:07 +00001144 saved_val,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001145 False,
1146 )
1147
1148 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1149
1150 # 2. Convert the model to quantized TFLite flatbuffer
1151 module = tf.Module()
1152 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1153 [concrete_function], module
1154 )
1155 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1156 converter.experimental_new_converter = True
1157
1158 # use MLIR-based post-quantizer
1159 converter.experimental_new_quantizer = True
1160
1161 flag = (
1162 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1163 )
1164 if tflite_inference_dtype == tf.int16:
1165 converter.target_spec.supported_ops = [flag]
1166
Won Jeone2325d12023-06-10 15:25:54 +00001167 # Generator function for integer quantization of TFLiteConverter
1168 # which generates a few hundred input samples with the same order, type, and shape as the inputs,
1169 # to calibrate/estimate the range of the floating-point inputs.
1170 # For broadcast fuzzing tests, fuzzing needs to be disabled, otherwise, it causes a mismatch of
1171 # tensor shapes of inputs.
Jeremy Johnson015c3552022-02-23 12:15:03 +00001172 def input_stats():
1173 for i in range(0, args.num_samples):
Won Jeone2325d12023-06-10 15:25:54 +00001174 placeholders, _ = (
Won Jeon6c93f412023-07-08 07:04:08 +00001175 tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng, True)
Won Jeone2325d12023-06-10 15:25:54 +00001176 if tensor_gen_fcn == "tgBFuzz"
1177 else tensor_gen_fcn(op, placeholder_shapes[0], dtype, rng)
1178 )
1179 yield [s[1] for s in placeholders]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001180
1181 converter.representative_dataset = input_stats
1182 converter.inference_input_type = tflite_inference_dtype
1183 converter.inference_output_type = tflite_inference_dtype
1184
1185 tflite_model = converter.convert()
1186
1187 tflite_model_filename = "model.tflite"
1188
1189 # Write out converted model to disk
1190 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1191 f.write(tflite_model)
1192
1193 else: # is_quantized is False
1194
1195 # 1. Saved out numpy array directly
1196 for idx, (name, val) in enumerate(placeholders):
1197 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001198
1199 # Complex tensors are expected to be repsesented by a
1200 # single floating point tensor of shape [?, ..., ?, 2].
1201 if val.dtype == np.complex64:
1202 val_shape = val.shape + (2,)
1203 val = val.view(np.float32)
1204 val = val.reshape(val_shape)
1205
Jeremy Johnson015c3552022-02-23 12:15:03 +00001206 np.save(
1207 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1208 )
1209
1210 # 2.a Saved out .pb if framework includes tensorflow
1211 if "tf" not in excluded_framework_list:
1212 # Write out graph as protobuf to disk
1213 tf_model_filename = "model.pb"
1214 tf.io.write_graph(
1215 concrete_function.graph, test_dir, tf_model_filename, True
1216 )
1217
1218 # 2.b Saved out .tflite if framework includes tflite
1219 if "tflite" not in excluded_framework_list:
1220 # Convert the model to TFLite flatbuffer
1221 module = tf.Module()
1222 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1223 [concrete_function], module
1224 )
1225
1226 converter.experimental_new_converter = True
1227
1228 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1229 converter.inference_input_type = tf.float32
1230 converter.inference_output_type = tf.float32
1231 tflite_model = converter.convert()
1232
1233 # Write out converted model to disk
1234 tflite_model_filename = "model.tflite"
1235 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1236 f.write(tflite_model)
1237
1238 # Get TF reference result if .pb is specified
1239 if tf_model_filename:
1240 tf_result_npy_filename = "tf_result.npy"
1241 tf_result = concrete_function(*placeholder_vals)
1242 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1243
1244 tf_result_name = result_name
1245
1246 # Get TFLite inference result if .tflite is specified
1247 if tflite_model_filename:
1248 tflite_result_npy_filename = "tflite_result.npy"
1249
Luke Hutton5c844212023-01-27 14:17:52 +00001250 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001251
1252 if args.tflite_kernel_mode == "optimized" or (
1253 op_name in ops_with_optimized_only_kernel
1254 ):
1255 interpreter = tf.lite.Interpreter(
1256 model_path=os.path.join(test_dir, tflite_model_filename)
1257 )
1258 elif args.tflite_kernel_mode == "reference":
1259 interpreter = tf.lite.Interpreter(
1260 model_path=os.path.join(test_dir, tflite_model_filename),
1261 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1262 )
1263 else:
1264 assert 0, "unknown tflite interpreter mode {}".format(
1265 args.tflite_kernel_mode
1266 )
1267 interpreter.allocate_tensors()
1268
1269 input_details = interpreter.get_input_details()
1270 output_details = interpreter.get_output_details()
1271
1272 assert len(input_details) == len(
1273 placeholder_vals
1274 ), "number of placeholder mismatch"
1275
1276 for idx, val in enumerate(placeholder_vals):
1277 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1278
1279 interpreter.invoke()
1280 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1281
1282 np.save(
1283 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1284 )
1285
1286 # Result tensor name would change after converting to TFLite flatbuffer
1287 # Overwrite the information from TFLite models directly.
1288 # Assume single result tensor now
1289 tflite_result_name = output_details[0]["name"]
1290
Eric Kunze97b00272023-07-20 10:52:56 -07001291 _, test_name = os.path.split(test_dir)
1292
Jeremy Johnson015c3552022-02-23 12:15:03 +00001293 # Write out test descriptor
1294 write_test_json(
1295 filename=os.path.join(test_dir, "test.json"),
1296 tf_model_filename=tf_model_filename,
1297 tf_result_npy_filename=tf_result_npy_filename,
1298 tf_result_name=tf_result_name,
1299 tflite_model_filename=tflite_model_filename,
1300 tflite_result_npy_filename=tflite_result_npy_filename,
1301 tflite_result_name=tflite_result_name,
1302 ifm_name=placeholder_names,
1303 ifm_file=placeholder_npy_filenames,
1304 ifm_shape=placeholder_shapes,
1305 framework_exclusions=excluded_framework_list,
1306 quantized=is_quantized,
Eric Kunze97b00272023-07-20 10:52:56 -07001307 test_name=test_name,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001308 )
1309 except Exception as e:
1310 msg = "Error running task: {}".format(e)
1311 print(msg)
1312 print(
1313 "".join(
1314 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1315 )
1316 )
1317 return False
1318 return True
1319
1320
1321def build_const_net(
1322 args,
1323 curr_shape,
1324 op_name,
1325 dtype,
1326 excluded_framework_list,
1327 quantized_inference_dtype,
1328 result_name,
1329 seed,
1330 rng,
1331 filter,
1332 unit_test_args,
1333):
1334
1335 if quantized_inference_dtype:
1336 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1337 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1338 else:
1339 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1340 test_dir = os.path.join(args.output_dir, test_dir)
1341
1342 # If the operator has an additional function to generate arguments, call it
1343 # here and iterate through the argument list that it generates
1344 op = TF_OP_LIST[op_name]
1345 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1346
TatWai Chongfd629052022-07-25 04:01:58 +00001347 try:
1348 rank_lo, rank_hi = op["rank"]
1349 except KeyError:
1350 # Set testing rank to (1, 4) in default.
1351 rank_lo = 1
1352 rank_hi = 4
1353
1354 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1355 return
1356
Jeremy Johnson015c3552022-02-23 12:15:03 +00001357 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1358 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001359 # Only filter on the full test_name, not the output directory
1360 _, test_name = os.path.split(test_dir + desc)
1361 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001362 unit_test_args.append(
1363 [
1364 op_name,
1365 args,
1366 test_dir + desc,
1367 curr_shape,
1368 addl_args,
1369 dtype,
1370 excluded_framework_list,
1371 quantized_inference_dtype,
1372 result_name,
1373 seed,
1374 ]
1375 )
1376
1377
1378# python hash is not reproducible, create hash for our purpose
1379def op_name_hash(op_name):
1380 result = 0xDEADBEEF
1381 for ch in op_name:
1382 if result & 1:
1383 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1384 else:
1385 result = (ord(ch) << 24) ^ (result >> 1)
1386
1387 return result
1388
1389
1390def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1391
1392 if not args.quiet:
1393 print(
1394 "Generating tests for {} ".format(
1395 op_name
1396 )
1397 )
1398
1399 op = TF_OP_LIST[op_name]
1400
1401 # Seed the RNG so that we get the same random tests for each test each time
1402 # If the number of tests for a given generation function changes, the tests
1403 # for that operator may also change accordingly, but this will at least keep
1404 # down churn across operators.
1405
1406 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1407 np.int32
1408 ).max
1409 rng = np.random.default_rng(bounded_hash_val)
1410
1411 # this is a dictionary with 'tf' and 'tflite' as key
1412 # and value being the data types we want to test under these framework
1413
1414 if isinstance(op["types"], dict):
1415 try:
1416 tf_dtypes = op["types"]["tf"]
1417 except KeyError:
1418 tf_dtypes = []
1419 try:
1420 tflite_dtypes = op["types"]["tflite"]
1421 except KeyError:
1422 tflite_dtypes = []
1423 elif isinstance(op["types"], list):
1424 tf_dtypes = op["types"]
1425 tflite_dtypes = op["types"]
1426
1427 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1428 tflite_quantized_dtypes = []
1429 tflite_nonquantized_dtypes = []
1430 for dtype in tflite_dtypes:
1431 if isinstance(dtype, QuantType):
1432 tflite_quantized_dtypes.append(dtype)
1433 else:
1434 tflite_nonquantized_dtypes.append(dtype)
1435
1436 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1437 set(tflite_nonquantized_dtypes)
1438 )
1439 nonquantized_dtypes = list(nonquantized_dtypes_set)
1440 quantized_dtypes = tflite_quantized_dtypes
1441
Jerry Ge5dd5a552023-05-23 22:41:20 +00001442 # append custom_shapes or replace shape_list with custom_shapes
1443 try:
1444 custom_shapes = op["custom_shapes"]
1445 if custom_shapes["custom_shape_only"]:
1446 shape_list = custom_shapes["shape_list"]
1447 else:
Jerry Geabdac232023-06-12 16:27:16 +00001448 shape_list = shape_list.copy()
1449 shape_list.append(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001450 except KeyError:
1451 pass
1452
Jeremy Johnson015c3552022-02-23 12:15:03 +00001453 # populate non quantized unit test arguments
1454 for dtype in nonquantized_dtypes:
1455
1456 excluded_framework_set = set(ALL_FRAMEWORKS)
1457 if dtype in tf_nonquantized_dtypes:
1458 excluded_framework_set.remove("tf")
1459 if dtype in tflite_nonquantized_dtypes:
1460 excluded_framework_set.remove("tflite")
1461 excluded_framework_list = list(excluded_framework_set)
1462
1463 for curr_shape in shape_list:
1464 build_const_net(
1465 args,
1466 curr_shape,
1467 op_name,
1468 dtype,
1469 excluded_framework_list,
1470 None,
1471 result_name,
1472 bounded_hash_val,
1473 rng,
1474 filter,
1475 unit_test_args,
1476 )
1477
1478 # populate quantized unit test arguments
1479 # must exclude 'tf' and source dtype being tf.float32
1480 for dtype in quantized_dtypes:
1481 for curr_shape in shape_list:
1482 build_const_net(
1483 args,
1484 curr_shape,
1485 op_name,
1486 tf.float32,
1487 ["tf"],
1488 dtype,
1489 result_name,
1490 bounded_hash_val,
1491 rng,
1492 filter,
1493 unit_test_args,
1494 )
1495
1496 return unit_test_args
1497
1498
1499def createDynamicOpLists():
1500 """The templated operators are conv2d-style operators with a number of kernel
1501 sizes. Since the operator is unchanged, we generate the range of kernel
1502 sizes here in this loop and remove the original templates from the list.
1503
1504 This could be expanded to non-conv2d-style operators in the future."""
1505
1506 # Dynamically create op lists for convolutions with a list of kernel sizes
1507 KERNELS = [
1508 [1, 1],
1509 [3, 3],
1510 [5, 5],
1511 ]
1512
TatWai Chongfd629052022-07-25 04:01:58 +00001513 # dim = [D, H, W]
1514 KERNELS_3D = [
1515 [1, 1, 1],
1516 [2, 3, 3],
1517 [3, 5, 5],
1518 ]
1519
Jeremy Johnson015c3552022-02-23 12:15:03 +00001520 TEMPLATE_LIST = [
1521 "conv2d",
1522 "conv2d_bias",
1523 "conv2d_relu",
1524 "conv2d_relu6",
1525 "conv2d_relu_n1_to_1",
1526 "conv2d_tanh",
1527 "depthwise_conv2d",
1528 "depthwise_conv2d_bias",
1529 "transpose_conv2d",
1530 ]
1531
TatWai Chongfd629052022-07-25 04:01:58 +00001532 TEMPLATE_LIST_CONV3D = [
1533 "conv3d",
1534 "conv3d_bias",
1535 ]
1536
Jeremy Johnson015c3552022-02-23 12:15:03 +00001537 for t in TEMPLATE_LIST:
1538 for k in KERNELS:
1539 testName = "{}_{}x{}".format(t, k[0], k[1])
1540 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1541 TF_OP_LIST[testName]["filter"] = k
1542 TF_OP_LIST[testName]["template"] = False
1543
TatWai Chongfd629052022-07-25 04:01:58 +00001544 # The existing operators don't support the dimension of kernel that is higher than 2.
1545 for t in TEMPLATE_LIST_CONV3D:
1546 for k in KERNELS_3D:
1547 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1548 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1549 TF_OP_LIST[testName]["filter"] = k
1550 TF_OP_LIST[testName]["template"] = False
1551
Jeremy Johnson015c3552022-02-23 12:15:03 +00001552 # Delete any templates after having created any dynamic ops
1553 # This is a two-pass operation because it's bad practice to delete
1554 # keys from dictionaries while iterating
1555 keyList = []
1556 for k in TF_OP_LIST:
1557 try:
1558 if TF_OP_LIST[k]["template"]:
1559 keyList.append(k)
1560 continue
1561 except KeyError:
1562 pass
1563
1564 for k in keyList:
1565 del TF_OP_LIST[k]
1566
1567
1568def main():
1569 parser = argparse.ArgumentParser()
1570 parser.add_argument(
1571 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1572 )
1573 parser.add_argument(
1574 "--random-shapes",
1575 dest="random_shapes",
1576 default=0,
1577 type=int,
1578 help=(
1579 "Use N random shapes of each rank for generating tests,"
1580 "seeded with random seed"
1581 ),
1582 )
1583 parser.add_argument(
1584 "-o",
1585 "--output-dir",
1586 dest="output_dir",
1587 default=".",
1588 type=str,
1589 help="Test output directory path prefix",
1590 )
1591 parser.add_argument(
1592 "-q",
1593 "--quiet",
1594 dest="quiet",
1595 default=False,
1596 action="store_true",
1597 help="Do not print test names",
1598 )
1599 parser.add_argument(
1600 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1601 )
1602 parser.add_argument(
1603 "-m",
1604 "--tflite-kernel-mode",
1605 dest="tflite_kernel_mode",
1606 type=str,
1607 choices=["reference", "optimized"],
1608 default="reference",
1609 help="TFLite interpreter kernel mode",
1610 )
1611 parser.add_argument(
1612 "--num-samples",
1613 dest="num_samples",
1614 default=200,
1615 type=int,
1616 help="Number of input samples for post-training quantization",
1617 )
1618 parser.add_argument(
1619 "--filter",
1620 dest="filter",
1621 default="",
1622 type=str,
1623 help="Filter test names by this expression",
1624 )
1625 args = parser.parse_args()
1626
1627 # Turn the filter into a re object if present
1628 filter = None
1629 if args.filter != "":
1630 filter = re.compile(args.filter)
1631
1632 # Autodetect CPU count
1633 if args.jobs <= 0:
1634 args.jobs = os.cpu_count()
1635
1636 # Disable TF info messages
1637 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1638
1639 try:
1640 os.makedirs(args.output_dir)
1641 except FileExistsError:
1642 pass
1643
1644 if args.random_shapes:
1645 gen_rand_shapes(args)
1646
1647 # Build dynamic ops
1648 createDynamicOpLists()
1649
1650 # Generate the test list and arguments to run_unit_test()
1651 unit_test_args = []
1652
1653 for op in TF_OP_LIST:
1654 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1655
1656 errors = 0
1657 for t in unit_test_args:
1658 if not run_unit_test(*t):
1659 errors = errors + 1
1660
1661 if not args.quiet:
1662 print("\nAll tasks done - with {} errors".format(errors))
1663
1664 return 1 if errors else 0
1665
1666
1667if __name__ == "__main__":
1668 exit(main())