blob: 742f991b8369b1f47d289dca34d1427e4f052f24 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000143 "prelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
148 },
149 },
TatWai Chong473eb382022-08-02 04:21:30 +0000150 "gelu": {
151 "operands": (1, 0),
152 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
153 "types": {
154 # Need compiler support for tf.Erf.
155 # "tf": TYPE_F,
156 "tflite": list(
157 # Only float32, int8 and uint8 supported currently
158 TYPE_F
159 + [QuantType.ALL_U8, QuantType.ALL_I8]
160 ),
161 },
162 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000163 "concat": {
164 "operands": (2, 0),
165 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
166 "types": TYPE_FI,
167 },
168 "bitwise_and": {
169 "operands": (2, 0),
170 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
171 "types": {"tf": TYPE_I}, # Not supported in TF Lite
172 },
173 "bitwise_or": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
176 "types": {"tf": TYPE_I}, # Not supported in TF Lite
177 },
178 "bitwise_not": {
179 "operands": (1, 0),
180 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_xor": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "logical_and": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
191 "types": TYPE_B,
192 },
193 "logical_or": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
196 "types": TYPE_B,
197 },
198 "logical_not": {
199 "operands": (1, 0),
200 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "reduce_any": {
204 "operands": (1, 0),
205 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
206 "types": TYPE_B,
207 },
208 "reduce_all": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
211 "types": {"tf": TYPE_B},
212 },
213 "reduce_min": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": {
217 "tf": TYPE_FI,
218 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
219 },
220 },
221 "reduce_max": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_sum": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_F,
234 # v2 converter doesn't recognize quantized reduce_sum
235 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 "tflite": TYPE_F,
237 },
238 },
239 "reduce_mean": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 "tflite": list(
245 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
246 ),
247 },
248 },
249 "reduce_product": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": TYPE_F,
253 },
254 "min": {
255 "operands": (2, 0),
256 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
257 "types": TYPE_FI,
258 },
259 "max": {
260 "operands": (2, 0),
261 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
262 "types": TYPE_FI,
263 },
264 "pow": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
267 # Technically, integer is supported, but only for positive exponents.
268 # Needs a random argument generator.
269 "types": TYPE_F,
270 },
271 "abs": {
272 "operands": (1, 0),
273 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
274 "types": TYPE_F,
275 },
276 "ceil": {
277 "operands": (1, 0),
278 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
279 "types": TYPE_F,
280 },
281 "floor": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "log": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "negate": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "rsqrt": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800301 "sign": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
304 "types": {
305 "tf": TYPE_F,
306 },
307 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000308 "sigmoid": {
309 "operands": (1, 0),
310 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
311 "types": {
312 "tf": TYPE_F,
313 "tflite": list(
314 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
315 ),
316 },
317 },
318 "tanh": {
319 "operands": (1, 0),
320 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
321 "types": {
322 "tf": TYPE_F,
323 "tflite": list(
324 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
325 ),
326 },
327 },
Luke Hutton41601862022-12-06 17:29:15 +0000328 "sin": {
329 "operands": (1, 0),
330 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000331 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000332 },
333 "cos": {
334 "operands": (1, 0),
335 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000336 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000337 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000338 "square": {
339 "operands": (1, 0),
340 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
341 "types": TYPE_F,
342 },
343 "squared_difference": {
344 "operands": (2, 0),
345 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
346 "types": TYPE_F,
347 },
348 "equal": {
349 "operands": (2, 0),
350 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
351 "types": TYPE_FI,
352 },
353 "greater_equal": {
354 "operands": (2, 0),
355 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
356 "types": TYPE_FI,
357 },
358 "greater": {
359 "operands": (2, 0),
360 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
361 "types": TYPE_FI,
362 },
363 "less": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
366 "types": TYPE_FI,
367 },
368 "less_equal": {
369 "operands": (2, 0),
370 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
371 "types": TYPE_FI,
372 },
373 "conv2d_TEMPLATE": {
374 "operands": (1, 1),
375 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
376 "types": {
377 "tf": [tf.float32],
378 "tflite": [
379 tf.float32,
380 QuantType.CONV_U8_U8,
381 QuantType.CONV_I8_I8,
382 QuantType.CONV_I16_I8,
383 ],
384 },
385 "template": True,
386 },
387 "conv2d_relu_TEMPLATE": {
388 "operands": (1, 2),
389 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
390 "types": {
391 "tf": [tf.float32],
392 "tflite": [
393 tf.float32,
394 QuantType.CONV_U8_U8,
395 QuantType.CONV_I8_I8,
396 QuantType.CONV_I16_I8,
397 ],
398 },
399 "template": True,
400 },
401 "conv2d_relu6_TEMPLATE": {
402 "operands": (1, 2),
403 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
404 "types": {
405 "tf": [tf.float32],
406 "tflite": [
407 tf.float32,
408 QuantType.CONV_U8_U8,
409 QuantType.CONV_I8_I8,
410 QuantType.CONV_I16_I8,
411 ],
412 },
413 "template": True,
414 },
415 "conv2d_relu_n1_to_1_TEMPLATE": {
416 "operands": (1, 2),
417 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
418 "types": {
419 "tf": [tf.float32],
420 "tflite": [
421 tf.float32,
422 QuantType.CONV_U8_U8,
423 QuantType.CONV_I8_I8,
424 QuantType.CONV_I16_I8,
425 ],
426 },
427 "template": True,
428 },
429 # This test is converted as:
430 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
431 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
432 "conv2d_tanh_TEMPLATE": {
433 "operands": (1, 2),
434 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
435 "types": {
436 "tf": [tf.float32],
437 "tflite": [
438 tf.float32,
439 QuantType.CONV_U8_U8,
440 QuantType.CONV_I8_I8,
441 QuantType.CONV_I16_I8,
442 ],
443 },
444 "template": True,
445 },
446 "conv2d_bias_TEMPLATE": {
447 "operands": (1, 2),
448 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
449 "types": {
450 "tf": [tf.float32],
451 "tflite": [
452 tf.float32,
453 QuantType.CONV_U8_U8,
454 QuantType.CONV_I8_I8,
455 QuantType.CONV_I16_I8,
456 ],
457 },
458 "bias": True,
459 "template": True,
460 },
TatWai Chongfd629052022-07-25 04:01:58 +0000461 "conv3d_TEMPLATE": {
462 "operands": (1, 1),
463 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
464 "types": {
465 "tf": [tf.float32],
466 "tflite": [
467 tf.float32,
468 QuantType.CONV_U8_U8,
469 QuantType.CONV_I8_I8,
470 # Quantization to 16x8-bit not yet supported by tflite.
471 ],
472 },
473 "template": True,
474 "rank": (1, 5),
475 },
476 "conv3d_bias_TEMPLATE": {
477 "operands": (1, 2),
478 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
479 "types": {
480 "tf": [tf.float32],
481 "tflite": [
482 tf.float32,
483 QuantType.CONV_U8_U8,
484 QuantType.CONV_I8_I8,
485 # Quantization to 16x8-bit not yet supported by tflite.
486 ],
487 },
488 "bias": True,
489 "template": True,
490 "rank": (1, 5),
491 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000492 "depthwise_conv2d_TEMPLATE": {
493 "operands": (1, 1),
494 "build_fcn": (
495 TBuilder.DepthwiseConv2d,
496 TGen.tgDepthwiseConv2d,
497 ArgGen.agDepthwiseConv2d,
498 ),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 QuantType.CONV_I16_I8,
506 ],
507 },
508 "template": True,
509 },
510 "depthwise_conv2d_bias_TEMPLATE": {
511 "operands": (1, 2),
512 "build_fcn": (
513 TBuilder.DepthwiseConv2dWithBias,
514 TGen.tgDepthwiseConv2d,
515 ArgGen.agDepthwiseConv2d,
516 ),
517 "types": {
518 "tf": [tf.float32],
519 "tflite": [
520 tf.float32,
521 QuantType.CONV_U8_U8,
522 QuantType.CONV_I8_I8,
523 QuantType.CONV_I16_I8,
524 ],
525 },
526 "bias": True,
527 "template": True,
528 },
529 "transpose_conv2d_TEMPLATE": {
530 "operands": (1, 1),
531 "build_fcn": (
532 TBuilder.TransposeConv2d,
533 TGen.tgTransposeConv2d,
534 ArgGen.agTransposeConv2d,
535 ),
536 "types": {
537 "tf": [tf.float32],
538 "tflite": [
539 tf.float32,
540 QuantType.CONV_U8_U8,
541 QuantType.CONV_I8_I8,
542 QuantType.CONV_I16_I8,
543 ],
544 },
545 "template": True,
546 },
547 "argmax": {
548 "operands": (1, 0),
549 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
550 "types": {"tf": TYPE_F},
551 },
552 "avg_pool2d": {
553 "operands": (1, 0),
554 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
555 "types": {
556 "tf": TYPE_F,
557 "tflite": list(
558 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
559 ),
560 },
561 },
562 "max_pool2d": {
563 "operands": (1, 0),
564 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
565 "types": {
566 "tf": TYPE_F,
567 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
568 # ALL_I16 not supported yet
569 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
570 # QI16 is missing from MaxPoolOperandAndResultConstraints
571 # If adding QI16 back this test can run through.
572 },
573 },
574 "reshape": {
575 "operands": (1, 0),
576 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
577 "types": TYPE_FI,
578 },
579 "transpose": {
580 "operands": (1, 0),
581 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
582 "types": TYPE_FI,
583 },
584 "slice": {
585 "operands": (1, 0),
586 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
587 "types": TYPE_FI,
588 },
589 "strided_slice": {
590 "operands": (1, 0),
591 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
592 "types": TYPE_FI,
593 },
594 "select": {
595 "operands": (3, 0),
596 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
597 "types": TYPE_FI,
598 },
599 "addn": {
600 "operands": (4, 0),
601 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
602 "types": TYPE_FI,
603 },
604 "concatv2": {
605 "operands": (4, 0),
606 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
607 "types": TYPE_FI,
608 },
609 "stack": {
610 "operands": (4, 0),
611 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
612 "types": TYPE_FI,
613 },
614 "unstack": {
615 "operands": (1, 0),
616 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
617 "types": TYPE_F,
618 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000619 "mirrorpad": {
620 "operands": (1, 0),
621 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
622 "types": TYPE_FI,
623 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000624 "pad": {
625 "operands": (1, 0),
626 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
627 "types": TYPE_F,
628 },
629 "expand_dims": {
630 "operands": (1, 0),
631 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
632 "types": TYPE_FI,
633 },
634 "shape": {
635 "operands": (1, 0),
636 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
637 "types": TYPE_FI,
638 },
639 "rank": {
640 "operands": (1, 0),
641 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
642 "types": TYPE_FI,
643 },
644 "fill": {
645 "operands": (1, 0),
646 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
647 "types": TYPE_FI,
648 },
649 "elu": {
650 "operands": (1, 0),
651 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
652 "types": TYPE_F,
653 },
654 "softmax": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
657 "types": {
658 "tf": TYPE_F,
659 "tflite": list(
660 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
661 ),
662 },
663 },
664 "log_softmax": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
667 "types": TYPE_F,
668 },
669 "matmul": {
670 "operands": (2, 0),
671 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
672 "types": {
673 "tf": TYPE_F,
674 "tflite": list(
675 TYPE_F
676 + [QuantType.ALL_U8, QuantType.ALL_I8]
677 # 16 bits matmul fail to convert
678 ),
679 },
680 },
681 "add_scalar": {
682 "operands": (1, 0),
683 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
684 "types": TYPE_F,
685 },
686 "add_1d": {
687 "operands": (2, 0),
688 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
689 "types": TYPE_F,
690 },
691 "split": {
692 "operands": (1, 0),
693 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
694 "types": TYPE_FI,
695 },
696 "tile": {
697 "operands": (1, 0),
698 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
699 "types": TYPE_FI,
700 },
701 "reverse": {
702 "operands": (1, 0),
703 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
704 "types": {"tf": TYPE_FI},
705 },
706 "gather": {
707 "operands": (1, 0),
708 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
709 "types": TYPE_FI,
710 },
711 "gather_nd": {
712 "operands": (1, 0),
713 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
714 "types": TYPE_FI,
715 },
716 "scatter_nd": {
717 "operands": (1, 0),
718 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
719 "types": TYPE_FI,
720 },
721 "space_to_batch": {
722 "operands": (1, 0),
723 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
724 "types": TYPE_F,
725 },
726 "batch_to_space": {
727 "operands": (1, 0),
728 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
729 "types": TYPE_F,
730 },
731 "space_to_depth": {
732 "operands": (1, 0),
733 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
734 "types": TYPE_F,
735 },
736 "depth_to_space": {
737 "operands": (1, 0),
738 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
739 "types": TYPE_F,
740 },
741 "one_hot": {
742 "operands": (3, 1),
743 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
744 "types": TYPE_FI,
745 },
746 "fakequant": {
747 "operands": (1, 0),
748 "build_fcn": (
749 TBuilder.Fakequant,
750 TGen.tgBasic,
751 ArgGen.agFakequant,
752 ),
753 "types": {"tf": TYPE_F},
754 },
755 "resize_nearest": {
756 "operands": (1, 0),
757 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
758 "types": {
759 "tf": TYPE_F,
760 "tflite": list(
761 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
762 ),
763 },
764 },
765 "resize_bilinear": {
766 "operands": (1, 0),
767 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
768 "types": {
769 "tf": TYPE_F,
770 "tflite": list(
771 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
772 ),
773 },
774 },
TatWai Chongf7326092022-06-08 12:17:14 -0700775 "resize_bilinear_v1_align_corners": {
776 "operands": (1, 0),
777 "build_fcn": (
778 TBuilder.ResizeBilinearV1AlignCorners,
779 TGen.tgPooling,
780 ArgGen.agNone,
781 ),
782 "types": {
783 "tf": TYPE_F,
784 "tflite": list(
785 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
786 ),
787 },
788 },
789 "resize_bilinear_v1_none": {
790 "operands": (1, 0),
791 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
792 "types": {
793 "tf": TYPE_F,
794 "tflite": list(
795 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
796 ),
797 },
798 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000799 "left_shift": {
800 "operands": (1, 0),
801 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
802 "types": {"tf": [tf.int32]},
803 },
804 "right_shift": {
805 "operands": (1, 0),
806 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
807 "types": {
808 "tf": [
809 tf.int32,
810 ]
811 },
812 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700813 "while": {
814 "operands": (1, 0),
815 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
816 "types": {
817 "tflite": list(TYPE_F),
818 },
819 },
820 "lstm": {
821 "operands": (1, 0),
822 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
823 "types": {
824 "tflite": [
825 tf.float32,
826 # tf.int32
827 ]
828 },
829 },
830 "gru": {
831 "operands": (1, 0),
832 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
833 "types": {
834 "tflite": [
835 tf.float32,
836 # tf.int32
837 ]
838 },
839 },
840 "rnn": {
841 "operands": (1, 0),
842 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
843 "types": {
844 "tflite": [
845 tf.float32,
846 ]
847 },
848 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000849 "rfft2d": {
850 "operands": (1, 0),
851 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
852 "types": {
853 "tflite": TYPE_F,
854 },
855 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000856}
857
858# Shapes to be tested; default can be overwritten
859shape_list = [
860 (1,),
861 (64,),
862 (14, 19),
863 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000864 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000865 (1, 4, 4, 4),
866 (1, 8, 4, 17),
867 (1, 4, 8, 19),
868 (1, 32, 32, 8),
869 (1, 7, 7, 9),
TatWai Chongfd629052022-07-25 04:01:58 +0000870 (2, 2, 7, 7, 2),
871 (1, 4, 8, 21, 17),
872 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000873]
874
875
876def gen_rand_shapes(args):
877 """Overwrite the global shape list with a new list of random shapes"""
878 global shape_list
879
880 rng = np.random.default_rng(args.random_seed)
881
882 # Don't let things get too big... cap the maximum volume, but let
883 # an individual dimension be 1..47
884 max_total_volume = 32 * 32 * 4
885
886 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000887 # Only iterate over ranks 2, 3, 4, and 5
888 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000889 for n in range(args.random_shapes):
890 new_shape = rng.integers(1, 48, size=rank)
891
TatWai Chongfd629052022-07-25 04:01:58 +0000892 # Set the batch dimension on 4D or 5D objects to 1
893 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000894 new_shape[0] = 1
895
896 # Limit the total shape volume and throw out any
897 # shapes that wouldn't leave at least size=2 in some non-batch dimension
898 volume = 1
899 skip_shape = False
900 for i in range(rank):
901
902 volume *= new_shape[i]
903
904 # Reduce the shape, while it's larger than the maximum volume
905 while volume > max_total_volume:
906 new_shape[i] = new_shape[i] // 2
907 volume = volume // 2
908
909 # Now an untenable dimension size? Skip this one.
910 if new_shape[i] < 1:
911 skip_shape = True
912
913 if not skip_shape:
914 shape_list.append(tuple(new_shape))
915
916
917# Construct, run and save a whole tensorflow tf.function to a protobuf file
918# or convert to .tflite if it's quantized unit test
919def run_unit_test(
920 op_name,
921 args,
922 test_dir,
923 curr_shape,
924 addl_args,
925 dtype,
926 excluded_framework_list,
927 quantized_inference_dtype,
928 result_name,
929 seed,
930):
931
932 try:
933 op = TF_OP_LIST[op_name]
934 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
935
936 # Get and seed a random number generator for this test
937 rng = np.random.default_rng(seed)
938
939 # return placeholders=(str: name, np.array: value)
940 # consts=(str: name, np.array: value)
941 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
942
943 # if test doesn't have any placeholders/consts, terminated
944 if len(placeholders) == 0 and len(consts) == 0:
945 return True
946
947 if not args.quiet:
948 print(" {} ".format(test_dir))
949
950 try:
951 os.mkdir(test_dir)
952 except FileExistsError:
953 pass
954
955 const_nodes = [value for name, value in consts]
956
957 num_placeholders = len(placeholders)
958 # if test is quantized, create tensor quantization metadata info for
959 # each input tensor, based on different quantized type
960 if quantized_inference_dtype:
961 is_quantized = True
962 # TODO: support INT8 IFM x INT4 weight later
963 if quantized_inference_dtype == QuantType.ALL_U8:
964 qzero = [128] * num_placeholders
965 numpy_dtype = [np.uint8] * num_placeholders
966 tflite_inference_dtype = tf.uint8
967 elif quantized_inference_dtype == QuantType.ALL_I8:
968 qzero = [0] * num_placeholders
969 numpy_dtype = [np.int8] * num_placeholders
970 tflite_inference_dtype = tf.int8
971 elif quantized_inference_dtype == QuantType.ALL_I16:
972 qzero = [0] * num_placeholders
973 numpy_dtype = [np.int16] * num_placeholders
974 tflite_inference_dtype = tf.int16
975 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
976 assert (
977 num_placeholders == 1
978 ), "Unsupported number of placeholders for Convolution: {}".format(
979 num_placeholders
980 )
981 qzero = [128] * num_placeholders
982 if num_placeholders == 2:
983 numpy_dtype = [np.uint8, np.uint8]
984 else:
985 numpy_dtype = [np.uint8, np.uint8, np.int32]
986 tflite_inference_dtype = tf.uint8
987 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
988 assert (
989 num_placeholders == 1
990 ), "Unsupported number of placeholders for Convolution: {}".format(
991 num_placeholders
992 )
993 qzero = [0] * num_placeholders
994 if num_placeholders == 2:
995 numpy_dtype = [np.int8, np.int8]
996 else:
997 numpy_dtype = [np.int8, np.int8, np.int32]
998 tflite_inference_dtype = tf.int8
999 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1000 assert (
1001 num_placeholders == 1
1002 ), "Unsupported number of placeholders for Convolution: {}".format(
1003 num_placeholders
1004 )
1005 if num_placeholders == 2:
1006 qzero = [0, 0]
1007 numpy_dtype = [np.int16, np.int8]
1008 else:
1009 qzero = [0, 0, 0]
1010 numpy_dtype = [
1011 np.int16,
1012 np.int8,
1013 np.int64,
1014 ] # np.int64 to represent 40 bits accumulator
1015 tflite_inference_dtype = tf.int16
1016 else:
1017 raise Exception(
1018 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1019 )
1020
1021 else:
1022 is_quantized = False
1023
1024 tf_model_filename = None
1025 tf_result_npy_filename = None
1026 tf_result_name = None
1027
1028 tflite_model_filename = None
1029 tflite_result_npy_filename = None
1030 tflite_result_name = None
1031
1032 placeholder_names = []
1033 placeholder_vals = []
1034 placeholder_signatures = ()
1035 placeholder_npy_filenames = []
1036 placeholder_shapes = []
1037
1038 for idx, (name, val) in enumerate(placeholders):
1039 placeholder_names.append(name)
1040 placeholder_signatures = placeholder_signatures + (
1041 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1042 )
1043 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1044 placeholder_shapes.append(val.shape)
1045
1046 # Get test builder class
1047 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1048 concrete_function = tf.function(input_signature=placeholder_signatures)(
1049 fcn_node.eval
1050 ).get_concrete_function()
1051
1052 if is_quantized:
1053
1054 assert dtype is tf.float32, "quantized test must come from float32 graph"
1055
1056 # 1. Quantize float placeholder npy to quantized to feed the graph
1057 for idx, (name, val) in enumerate(placeholders):
1058
1059 # we use np.amin()/np.amax() to determine dynamic range
1060 # for quantized test
1061 zeropoint = 0
1062 scale = 1.0
1063 if numpy_dtype[idx] != np.int64:
1064 qmin = np.iinfo(numpy_dtype[idx]).min
1065 qmax = np.iinfo(numpy_dtype[idx]).max
1066 num_bits = np.iinfo(numpy_dtype[idx]).bits
1067 # 40 bit is represented as np.int64
1068 else:
1069 num_bits = 40
1070 qmin = -(1 << num_bits)
1071 qmax = (1 << num_bits) - 1
1072
1073 min_val = np.amin(val)
1074 max_val = np.amax(val)
1075
1076 # for single value tensor, we set scale equal to the abs(value),
1077 # and fix zeropoint to 128
1078 # if val > 0, it'll be represented as 129,
1079 # where val = (129 - 128) * val
1080 # if val < 0, it'll be represented as 127,
1081 # where val = (127 - 128) * (-val)
1082 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1083 # and let quantized 1 represent the value
1084 # also adjust effective min/max consequently
1085 if max_val == min_val:
1086 if max_val != 0:
1087 scale = abs(max_val)
1088 else:
1089 scale = 1.0
1090 min_val = float(qmin - qzero[idx]) * scale
1091 max_val = float(qmax - qzero[idx]) * scale
1092 else:
1093 scale = (max_val - min_val) / float(qmax - qmin)
1094 zeropoint = int(round((-min_val) / scale)) + qmin
1095
1096 # run through tf.fakequant first to assure quantization error aligned
1097 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1098 val,
1099 min=min_val,
1100 max=max_val,
1101 num_bits=num_bits,
1102 name="gen_quant_npy",
1103 )
1104
1105 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1106
1107 # very few unit tests after TF hash may/2020, this quantized
1108 # value for some reason exceed [0, 255] range
1109 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1110
1111 # saved all quantized tensor as np.int32
1112 # since TOSA numpy Cpp API only supports int32
1113 np.save(
1114 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1115 saved_val.astype(np.int32),
1116 False,
1117 )
1118
1119 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1120
1121 # 2. Convert the model to quantized TFLite flatbuffer
1122 module = tf.Module()
1123 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1124 [concrete_function], module
1125 )
1126 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1127 converter.experimental_new_converter = True
1128
1129 # use MLIR-based post-quantizer
1130 converter.experimental_new_quantizer = True
1131
1132 flag = (
1133 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1134 )
1135 if tflite_inference_dtype == tf.int16:
1136 converter.target_spec.supported_ops = [flag]
1137
1138 def input_stats():
1139 for i in range(0, args.num_samples):
1140 a = [
1141 TGen.getRand(shape, tf.float32, rng)
1142 for shape in placeholder_shapes
1143 ]
1144 yield a
1145
1146 converter.representative_dataset = input_stats
1147 converter.inference_input_type = tflite_inference_dtype
1148 converter.inference_output_type = tflite_inference_dtype
1149
1150 tflite_model = converter.convert()
1151
1152 tflite_model_filename = "model.tflite"
1153
1154 # Write out converted model to disk
1155 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1156 f.write(tflite_model)
1157
1158 else: # is_quantized is False
1159
1160 # 1. Saved out numpy array directly
1161 for idx, (name, val) in enumerate(placeholders):
1162 placeholder_vals.append(tf.convert_to_tensor(val))
1163 np.save(
1164 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1165 )
1166
1167 # 2.a Saved out .pb if framework includes tensorflow
1168 if "tf" not in excluded_framework_list:
1169 # Write out graph as protobuf to disk
1170 tf_model_filename = "model.pb"
1171 tf.io.write_graph(
1172 concrete_function.graph, test_dir, tf_model_filename, True
1173 )
1174
1175 # 2.b Saved out .tflite if framework includes tflite
1176 if "tflite" not in excluded_framework_list:
1177 # Convert the model to TFLite flatbuffer
1178 module = tf.Module()
1179 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1180 [concrete_function], module
1181 )
1182
1183 converter.experimental_new_converter = True
1184
1185 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1186 converter.inference_input_type = tf.float32
1187 converter.inference_output_type = tf.float32
1188 tflite_model = converter.convert()
1189
1190 # Write out converted model to disk
1191 tflite_model_filename = "model.tflite"
1192 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1193 f.write(tflite_model)
1194
1195 # Get TF reference result if .pb is specified
1196 if tf_model_filename:
1197 tf_result_npy_filename = "tf_result.npy"
1198 tf_result = concrete_function(*placeholder_vals)
1199 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1200
1201 tf_result_name = result_name
1202
1203 # Get TFLite inference result if .tflite is specified
1204 if tflite_model_filename:
1205 tflite_result_npy_filename = "tflite_result.npy"
1206
1207 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1208
1209 if args.tflite_kernel_mode == "optimized" or (
1210 op_name in ops_with_optimized_only_kernel
1211 ):
1212 interpreter = tf.lite.Interpreter(
1213 model_path=os.path.join(test_dir, tflite_model_filename)
1214 )
1215 elif args.tflite_kernel_mode == "reference":
1216 interpreter = tf.lite.Interpreter(
1217 model_path=os.path.join(test_dir, tflite_model_filename),
1218 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1219 )
1220 else:
1221 assert 0, "unknown tflite interpreter mode {}".format(
1222 args.tflite_kernel_mode
1223 )
1224 interpreter.allocate_tensors()
1225
1226 input_details = interpreter.get_input_details()
1227 output_details = interpreter.get_output_details()
1228
1229 assert len(input_details) == len(
1230 placeholder_vals
1231 ), "number of placeholder mismatch"
1232
1233 for idx, val in enumerate(placeholder_vals):
1234 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1235
1236 interpreter.invoke()
1237 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1238
1239 np.save(
1240 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1241 )
1242
1243 # Result tensor name would change after converting to TFLite flatbuffer
1244 # Overwrite the information from TFLite models directly.
1245 # Assume single result tensor now
1246 tflite_result_name = output_details[0]["name"]
1247
1248 # Write out test descriptor
1249 write_test_json(
1250 filename=os.path.join(test_dir, "test.json"),
1251 tf_model_filename=tf_model_filename,
1252 tf_result_npy_filename=tf_result_npy_filename,
1253 tf_result_name=tf_result_name,
1254 tflite_model_filename=tflite_model_filename,
1255 tflite_result_npy_filename=tflite_result_npy_filename,
1256 tflite_result_name=tflite_result_name,
1257 ifm_name=placeholder_names,
1258 ifm_file=placeholder_npy_filenames,
1259 ifm_shape=placeholder_shapes,
1260 framework_exclusions=excluded_framework_list,
1261 quantized=is_quantized,
1262 )
1263 except Exception as e:
1264 msg = "Error running task: {}".format(e)
1265 print(msg)
1266 print(
1267 "".join(
1268 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1269 )
1270 )
1271 return False
1272 return True
1273
1274
1275def build_const_net(
1276 args,
1277 curr_shape,
1278 op_name,
1279 dtype,
1280 excluded_framework_list,
1281 quantized_inference_dtype,
1282 result_name,
1283 seed,
1284 rng,
1285 filter,
1286 unit_test_args,
1287):
1288
1289 if quantized_inference_dtype:
1290 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1291 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1292 else:
1293 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1294 test_dir = os.path.join(args.output_dir, test_dir)
1295
1296 # If the operator has an additional function to generate arguments, call it
1297 # here and iterate through the argument list that it generates
1298 op = TF_OP_LIST[op_name]
1299 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1300
TatWai Chongfd629052022-07-25 04:01:58 +00001301 try:
1302 rank_lo, rank_hi = op["rank"]
1303 except KeyError:
1304 # Set testing rank to (1, 4) in default.
1305 rank_lo = 1
1306 rank_hi = 4
1307
1308 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1309 return
1310
Jeremy Johnson015c3552022-02-23 12:15:03 +00001311 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1312 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001313 # Only filter on the full test_name, not the output directory
1314 _, test_name = os.path.split(test_dir + desc)
1315 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001316 unit_test_args.append(
1317 [
1318 op_name,
1319 args,
1320 test_dir + desc,
1321 curr_shape,
1322 addl_args,
1323 dtype,
1324 excluded_framework_list,
1325 quantized_inference_dtype,
1326 result_name,
1327 seed,
1328 ]
1329 )
1330
1331
1332# python hash is not reproducible, create hash for our purpose
1333def op_name_hash(op_name):
1334 result = 0xDEADBEEF
1335 for ch in op_name:
1336 if result & 1:
1337 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1338 else:
1339 result = (ord(ch) << 24) ^ (result >> 1)
1340
1341 return result
1342
1343
1344def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1345
1346 if not args.quiet:
1347 print(
1348 "Generating tests for {} ".format(
1349 op_name
1350 )
1351 )
1352
1353 op = TF_OP_LIST[op_name]
1354
1355 # Seed the RNG so that we get the same random tests for each test each time
1356 # If the number of tests for a given generation function changes, the tests
1357 # for that operator may also change accordingly, but this will at least keep
1358 # down churn across operators.
1359
1360 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1361 np.int32
1362 ).max
1363 rng = np.random.default_rng(bounded_hash_val)
1364
1365 # this is a dictionary with 'tf' and 'tflite' as key
1366 # and value being the data types we want to test under these framework
1367
1368 if isinstance(op["types"], dict):
1369 try:
1370 tf_dtypes = op["types"]["tf"]
1371 except KeyError:
1372 tf_dtypes = []
1373 try:
1374 tflite_dtypes = op["types"]["tflite"]
1375 except KeyError:
1376 tflite_dtypes = []
1377 elif isinstance(op["types"], list):
1378 tf_dtypes = op["types"]
1379 tflite_dtypes = op["types"]
1380
1381 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1382 tflite_quantized_dtypes = []
1383 tflite_nonquantized_dtypes = []
1384 for dtype in tflite_dtypes:
1385 if isinstance(dtype, QuantType):
1386 tflite_quantized_dtypes.append(dtype)
1387 else:
1388 tflite_nonquantized_dtypes.append(dtype)
1389
1390 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1391 set(tflite_nonquantized_dtypes)
1392 )
1393 nonquantized_dtypes = list(nonquantized_dtypes_set)
1394 quantized_dtypes = tflite_quantized_dtypes
1395
1396 # populate non quantized unit test arguments
1397 for dtype in nonquantized_dtypes:
1398
1399 excluded_framework_set = set(ALL_FRAMEWORKS)
1400 if dtype in tf_nonquantized_dtypes:
1401 excluded_framework_set.remove("tf")
1402 if dtype in tflite_nonquantized_dtypes:
1403 excluded_framework_set.remove("tflite")
1404 excluded_framework_list = list(excluded_framework_set)
1405
1406 for curr_shape in shape_list:
1407 build_const_net(
1408 args,
1409 curr_shape,
1410 op_name,
1411 dtype,
1412 excluded_framework_list,
1413 None,
1414 result_name,
1415 bounded_hash_val,
1416 rng,
1417 filter,
1418 unit_test_args,
1419 )
1420
1421 # populate quantized unit test arguments
1422 # must exclude 'tf' and source dtype being tf.float32
1423 for dtype in quantized_dtypes:
1424 for curr_shape in shape_list:
1425 build_const_net(
1426 args,
1427 curr_shape,
1428 op_name,
1429 tf.float32,
1430 ["tf"],
1431 dtype,
1432 result_name,
1433 bounded_hash_val,
1434 rng,
1435 filter,
1436 unit_test_args,
1437 )
1438
1439 return unit_test_args
1440
1441
1442def createDynamicOpLists():
1443 """The templated operators are conv2d-style operators with a number of kernel
1444 sizes. Since the operator is unchanged, we generate the range of kernel
1445 sizes here in this loop and remove the original templates from the list.
1446
1447 This could be expanded to non-conv2d-style operators in the future."""
1448
1449 # Dynamically create op lists for convolutions with a list of kernel sizes
1450 KERNELS = [
1451 [1, 1],
1452 [3, 3],
1453 [5, 5],
1454 ]
1455
TatWai Chongfd629052022-07-25 04:01:58 +00001456 # dim = [D, H, W]
1457 KERNELS_3D = [
1458 [1, 1, 1],
1459 [2, 3, 3],
1460 [3, 5, 5],
1461 ]
1462
Jeremy Johnson015c3552022-02-23 12:15:03 +00001463 TEMPLATE_LIST = [
1464 "conv2d",
1465 "conv2d_bias",
1466 "conv2d_relu",
1467 "conv2d_relu6",
1468 "conv2d_relu_n1_to_1",
1469 "conv2d_tanh",
1470 "depthwise_conv2d",
1471 "depthwise_conv2d_bias",
1472 "transpose_conv2d",
1473 ]
1474
TatWai Chongfd629052022-07-25 04:01:58 +00001475 TEMPLATE_LIST_CONV3D = [
1476 "conv3d",
1477 "conv3d_bias",
1478 ]
1479
Jeremy Johnson015c3552022-02-23 12:15:03 +00001480 for t in TEMPLATE_LIST:
1481 for k in KERNELS:
1482 testName = "{}_{}x{}".format(t, k[0], k[1])
1483 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1484 TF_OP_LIST[testName]["filter"] = k
1485 TF_OP_LIST[testName]["template"] = False
1486
TatWai Chongfd629052022-07-25 04:01:58 +00001487 # The existing operators don't support the dimension of kernel that is higher than 2.
1488 for t in TEMPLATE_LIST_CONV3D:
1489 for k in KERNELS_3D:
1490 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1491 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1492 TF_OP_LIST[testName]["filter"] = k
1493 TF_OP_LIST[testName]["template"] = False
1494
Jeremy Johnson015c3552022-02-23 12:15:03 +00001495 # Delete any templates after having created any dynamic ops
1496 # This is a two-pass operation because it's bad practice to delete
1497 # keys from dictionaries while iterating
1498 keyList = []
1499 for k in TF_OP_LIST:
1500 try:
1501 if TF_OP_LIST[k]["template"]:
1502 keyList.append(k)
1503 continue
1504 except KeyError:
1505 pass
1506
1507 for k in keyList:
1508 del TF_OP_LIST[k]
1509
1510
1511def main():
1512 parser = argparse.ArgumentParser()
1513 parser.add_argument(
1514 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1515 )
1516 parser.add_argument(
1517 "--random-shapes",
1518 dest="random_shapes",
1519 default=0,
1520 type=int,
1521 help=(
1522 "Use N random shapes of each rank for generating tests,"
1523 "seeded with random seed"
1524 ),
1525 )
1526 parser.add_argument(
1527 "-o",
1528 "--output-dir",
1529 dest="output_dir",
1530 default=".",
1531 type=str,
1532 help="Test output directory path prefix",
1533 )
1534 parser.add_argument(
1535 "-q",
1536 "--quiet",
1537 dest="quiet",
1538 default=False,
1539 action="store_true",
1540 help="Do not print test names",
1541 )
1542 parser.add_argument(
1543 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1544 )
1545 parser.add_argument(
1546 "-m",
1547 "--tflite-kernel-mode",
1548 dest="tflite_kernel_mode",
1549 type=str,
1550 choices=["reference", "optimized"],
1551 default="reference",
1552 help="TFLite interpreter kernel mode",
1553 )
1554 parser.add_argument(
1555 "--num-samples",
1556 dest="num_samples",
1557 default=200,
1558 type=int,
1559 help="Number of input samples for post-training quantization",
1560 )
1561 parser.add_argument(
1562 "--filter",
1563 dest="filter",
1564 default="",
1565 type=str,
1566 help="Filter test names by this expression",
1567 )
1568 args = parser.parse_args()
1569
1570 # Turn the filter into a re object if present
1571 filter = None
1572 if args.filter != "":
1573 filter = re.compile(args.filter)
1574
1575 # Autodetect CPU count
1576 if args.jobs <= 0:
1577 args.jobs = os.cpu_count()
1578
1579 # Disable TF info messages
1580 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1581
1582 try:
1583 os.makedirs(args.output_dir)
1584 except FileExistsError:
1585 pass
1586
1587 if args.random_shapes:
1588 gen_rand_shapes(args)
1589
1590 # Build dynamic ops
1591 createDynamicOpLists()
1592
1593 # Generate the test list and arguments to run_unit_test()
1594 unit_test_args = []
1595
1596 for op in TF_OP_LIST:
1597 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1598
1599 errors = 0
1600 for t in unit_test_args:
1601 if not run_unit_test(*t):
1602 errors = errors + 1
1603
1604 if not args.quiet:
1605 print("\nAll tasks done - with {} errors".format(errors))
1606
1607 return 1 if errors else 0
1608
1609
1610if __name__ == "__main__":
1611 exit(main())